Commit
·
4ecb39a
0
Parent(s):
initial commit -- just videos and some scripts from the original repo
Browse files- .gitattributes +10 -0
- README.md +339 -0
- data/a.npz +3 -0
- raw_data/videos/atanasie_DJI_0652_full/atanasie_DJI_0652_full_540p.mp4 +3 -0
- raw_data/videos/barsana_DJI_0500_0501_combined_sliced_2700_14700/barsana_DJI_0500_0501_combined_sliced_2700_14700_540p.mp4 +3 -0
- raw_data/videos/comana_DJI_0881_full/comana_DJI_0881_full_540p.mp4 +3 -0
- raw_data/videos/gradistei_DJI_0787_0788_0789_combined_sliced_3510_13110/gradistei_DJI_0787_0788_0789_combined_sliced_3510_13110_540p.mp4 +3 -0
- raw_data/videos/herculane_DJI_0021_full/herculane_DJI_0021_full_540p.mp4 +3 -0
- raw_data/videos/jupiter_DJI_0703_0704_0705_combined_sliced_10650_21715/jupiter_DJI_0703_0704_0705_combined_sliced_10650_21715_540p.mp4 +3 -0
- raw_data/videos/norway_210821_DJI_0015_full/norway_210821_DJI_0015_full_540p.mp4 +3 -0
- raw_data/videos/olanesti_DJI_0416_full/olanesti_DJI_0416_full_540p.mp4 +3 -0
- raw_data/videos/petrova_DJI_0525_0526_combined_sliced_2850_11850/petrova_DJI_0525_0526_combined_sliced_2850_11850_540p.mp4 +3 -0
- raw_data/videos/slanic_DJI_0956_0957_combined_sliced_780_9780/slanic_DJI_0956_0957_combined_sliced_780_9780_540p.mp4 +3 -0
- scripts/cfg.yaml +107 -0
- scripts/collage_slanic_DJI_0956_0957_combined_sliced_780_9780_3522.npz.png +3 -0
- scripts/convert_m2f_to_dronescapes.py +78 -0
- scripts/count_npz.sh +18 -0
- scripts/dronescapes_viewer.ipynb +0 -0
- scripts/dronescapes_viewer.py +41 -0
- scripts/eval_script_old.py +181 -0
- scripts/evaluate_semantic_segmentation.py +130 -0
- scripts/semantic_mapper.ipynb +0 -0
- scripts/symlinks_from_txt_list.py +124 -0
.gitattributes
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
5 |
+
data/ filter=lfs diff=lfs merge=lfs -text
|
6 |
+
raw_data/ filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.MP4 filter=lfs diff=lfs merge=lfs -text
|
10 |
+
|
README.md
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Dronescapes dataset
|
2 |
+
|
3 |
+
As introduced in our ICCV 2023 workshop paper: [link](https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Marcu_Self-Supervised_Hypergraphs_for_Learning_Multiple_World_Interpretations_ICCVW_2023_paper.pdf)
|
4 |
+
|
5 |
+

|
6 |
+
|
7 |
+
# 1. Downloading the data
|
8 |
+
|
9 |
+
## Option 1. Download the pre-processed dataset from HuggingFace repository
|
10 |
+
|
11 |
+
```
|
12 |
+
git lfs install # Make sure you have git-lfs installed (https://git-lfs.com)
|
13 |
+
git clone https://huggingface.co/datasets/Meehai/dronescapes
|
14 |
+
```
|
15 |
+
|
16 |
+
Note: the dataset has about 500GB, so it may take a while to clone it.
|
17 |
+
|
18 |
+
<details>
|
19 |
+
<summary> <b> Option 2. Generating the dataset from raw videos and basic labels </b>.</summary>
|
20 |
+
|
21 |
+
Recommended if you intend on understanding how the dataset was created or add new videos or representations.
|
22 |
+
|
23 |
+
### 1.2.1 Raw videos
|
24 |
+
|
25 |
+
Follow the commands in each directory under `raw_data/videos/*/commands.txt` if you want to start from the 4K videos.
|
26 |
+
|
27 |
+
If you only want the 540p videos as used in the paper, they are already provided in the `raw_data/videos/*` directories.
|
28 |
+
|
29 |
+
### 1.2.2 Semantic segmentation labels (human annotated)
|
30 |
+
|
31 |
+
These were human annotated and then propagated using [segprop](https://github.com/vlicaret/segprop).
|
32 |
+
|
33 |
+
```bash
|
34 |
+
cd raw_data/
|
35 |
+
tar -xzvf segprop_npz_540.tar.gz
|
36 |
+
```
|
37 |
+
|
38 |
+
### 1.2.3 Generate the rest of the representations
|
39 |
+
|
40 |
+
We use the [video-representations-extractor](https://gitlab.com/meehai/video-representations-extractor) to generate
|
41 |
+
the rest of the labels using pre-traing networks or algoritms.
|
42 |
+
|
43 |
+
Install it via `pip install video-representations-extractor` (or follow the README over there for docker or local env)
|
44 |
+
|
45 |
+
```
|
46 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=0 vre raw_data/videos/atanasie_DJI_0652_full/atanasie_DJI_0652_full_540p.mp4 -o raw_data/npz_540p/atanasie_DJI_0652_full/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
47 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=1 vre raw_data/videos/barsana_DJI_0500_0501_combined_sliced_2700_14700/barsana_DJI_0500_0501_combined_sliced_2700_14700_540p.mp4 -o raw_data/npz_540p/barsana_DJI_0500_0501_combined_sliced_2700_14700/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
48 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=2 vre raw_data/videos/comana_DJI_0881_full/comana_DJI_0881_full_540p.mp4 -o raw_data/npz_540p/comana_DJI_0881_full/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
49 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=3 vre raw_data/videos/gradistei_DJI_0787_0788_0789_combined_sliced_3510_13110/gradistei_DJI_0787_0788_0789_combined_sliced_3510_13110_540p.mp4 -o raw_data/npz_540p/gradistei_DJI_0787_0788_0789_combined_sliced_3510_13110/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
50 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=4 vre raw_data/videos/herculane_DJI_0021_full/herculane_DJI_0021_full_540p.mp4 -o raw_data/npz_540p/herculane_DJI_0021_full/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
51 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=5 vre raw_data/videos/jupiter_DJI_0703_0704_0705_combined_sliced_10650_21715/jupiter_DJI_0703_0704_0705_combined_sliced_10650_21715_540p.mp4 -o raw_data/npz_540p/jupiter_DJI_0703_0704_0705_combined_sliced_10650_21715/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
52 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=6 vre raw_data/videos/norway_210821_DJI_0015_full/norway_210821_DJI_0015_full_540p.mp4 -o raw_data/npz_540p/norway_210821_DJI_0015_full/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
53 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=7 vre raw_data/videos/olanesti_DJI_0416_full/olanesti_DJI_0416_full_540p.mp4 -o raw_data/npz_540p/olanesti_DJI_0416_full/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
54 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=0 vre raw_data/videos/petrova_DJI_0525_0526_combined_sliced_2850_11850/petrova_DJI_0525_0526_combined_sliced_2850_11850_540p.mp4 -o raw_data/npz_540p/petrova_DJI_0525_0526_combined_sliced_2850_11850/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
55 |
+
VRE_DEVICE=cuda CUDA_VISIBLE_DEVICES=1 vre raw_data/videos/slanic_DJI_0956_0957_combined_sliced_780_9780/slanic_DJI_0956_0957_combined_sliced_780_9780_540p.mp4 -o raw_data/npz_540p/slanic_DJI_0956_0957_combined_sliced_780_9780/ --cfg_path scripts/cfg.yaml --batch_size 3 --n_threads_data_storer 4 --output_dir_exist_mode overwrite
|
56 |
+
```
|
57 |
+
|
58 |
+
Note: `depth_sfm`, `normals_sfm` and `depth_ufo` are not available in VRE. Contact us for more info about them.
|
59 |
+
|
60 |
+
Note: Add `--representations "rgb" "opticalflow_rife" "depth_dpt" "edges_dexined" "semantic_mask2former_swin_mapillary" "softseg_gb"` to control if you only want a subset of the representations.
|
61 |
+
|
62 |
+
Note: Some batch sizes are overwritten in the config itself.
|
63 |
+
|
64 |
+
### 1.2.4 Convert Mask2Former from Mapillary classes to segprop8 classes
|
65 |
+
|
66 |
+
Since we are using pre-trained Mask2Former which has either mapillary or COCO panoptic classes, we need to convert them to dronescapes-compatible (8) classes.
|
67 |
+
|
68 |
+
To do this, we use the `scripts/convert_m2f_to_dronescapes.py` script:
|
69 |
+
```
|
70 |
+
python scripts/convert_m2f_to_dronescapes.py in_dir out_dir mapillary/coco [--overwrite]
|
71 |
+
```
|
72 |
+
|
73 |
+
```
|
74 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/atanasie_DJI_0652_full/semantic_mask2former_swin_mapillary raw_data/npz_540p/atanasie_DJI_0652_full/semantic_mask2former_swin_mapillary_converted mapillary
|
75 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/barsana_DJI_0500_0501_combined_sliced_2700_14700/semantic_mask2former_swin_mapillary raw_data/npz_540p/barsana_DJI_0500_0501_combined_sliced_2700_14700/semantic_mask2former_swin_mapillary_converted mapillary
|
76 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/comana_DJI_0881_full/semantic_mask2former_swin_mapillary raw_data/npz_540p/comana_DJI_0881_full/semantic_mask2former_swin_mapillary_converted mapillary
|
77 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/gradistei_DJI_0787_0788_0789_combined_sliced_3510_13110/semantic_mask2former_swin_mapillary raw_data/npz_540p/gradistei_DJI_0787_0788_0789_combined_sliced_3510_13110/semantic_mask2former_swin_mapillary_converted mapillary
|
78 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/herculane_DJI_0021_full/semantic_mask2former_swin_mapillary raw_data/npz_540p/herculane_DJI_0021_full/semantic_mask2former_swin_mapillary_converted mapillary
|
79 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/jupiter_DJI_0703_0704_0705_combined_sliced_10650_21715/semantic_mask2former_swin_mapillary raw_data/npz_540p/jupiter_DJI_0703_0704_0705_combined_sliced_10650_21715/semantic_mask2former_swin_mapillary_converted mapillary
|
80 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/norway_210821_DJI_0015_full/semantic_mask2former_swin_mapillary raw_data/npz_540p/norway_210821_DJI_0015_full/semantic_mask2former_swin_mapillary_converted mapillary
|
81 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/olanesti_DJI_0416_full/semantic_mask2former_swin_mapillary raw_data/npz_540p/olanesti_DJI_0416_full/semantic_mask2former_swin_mapillary_converted mapillary
|
82 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/petrova_DJI_0525_0526_combined_sliced_2850_11850/semantic_mask2former_swin_mapillary raw_data/npz_540p/petrova_DJI_0525_0526_combined_sliced_2850_11850/semantic_mask2former_swin_mapillary_converted mapillary
|
83 |
+
python scripts/convert_m2f_to_dronescapes.py raw_data/npz_540p/slanic_DJI_0956_0957_combined_sliced_780_9780/semantic_mask2former_swin_mapillary raw_data/npz_540p/slanic_DJI_0956_0957_combined_sliced_780_9780/semantic_mask2former_swin_mapillary_converted mapillary
|
84 |
+
```
|
85 |
+
|
86 |
+
### 1.2.5 Check counts for consistency
|
87 |
+
|
88 |
+
Run: `bash scripts/count_npz.sh raw_data/npz_540p`. At this point it should return:
|
89 |
+
| scene | rgb | depth_dpt | depth_sfm_manual20.. | edges_dexined | normals_sfm_manual.. | opticalflow_rife | semantic_mask2form.. | semantic_segprop8 |
|
90 |
+
|:----------|------:|------------:|-----------------------:|----------------:|-----------------------:|-------------------:|-----------------------:|--------------------:|
|
91 |
+
| atanasie | 9021 | 9021 | 9020 | 9021 | 9020 | 9021 | 9021 | 9001 |
|
92 |
+
| barsana | 12001 | 12001 | 12001 | 12001 | 12001 | 12000 | 12001 | 1573 |
|
93 |
+
| comana | 9022 | 9022 | 0 | 9022 | 0 | 9022 | 9022 | 1210 |
|
94 |
+
| gradistei | 9601 | 9601 | 9600 | 9601 | 9600 | 9600 | 9601 | 1210 |
|
95 |
+
| herculane | 9022 | 9022 | 9021 | 9022 | 9021 | 9022 | 9022 | 1210 |
|
96 |
+
| jupiter | 11066 | 11066 | 11065 | 11066 | 11065 | 11066 | 11066 | 1452 |
|
97 |
+
| norway | 2983 | 2983 | 0 | 2983 | 0 | 2983 | 2983 | 2941 |
|
98 |
+
| olanesti | 9022 | 9022 | 9021 | 9022 | 9021 | 9022 | 9022 | 1210 |
|
99 |
+
| petrova | 9001 | 9001 | 9001 | 9001 | 9001 | 9000 | 9001 | 1210 |
|
100 |
+
| slanic | 9001 | 9001 | 9001 | 9001 | 9001 | 9000 | 9001 | 9001 |
|
101 |
+
|
102 |
+
### 1.2.6. Split intro train, validation, semisupervised and train
|
103 |
+
|
104 |
+
We include 8 splits: 4 using only GT annotated semantic data and 4 using all available data (i.e. segproped between
|
105 |
+
annotated data). The indexes are taken from `txt_files/*`, i.e. `txt_files/manually_adnotated_files/test_files_116.txt`
|
106 |
+
refers to the fact that the (unseen at train time) test set (norway + petrova + barsana) contains 116 manually
|
107 |
+
annotated semantic files. We include all representations from above, not just semantic for all possible splits.
|
108 |
+
Adding new representations is as simple as running VRE on the 540p mp4 file
|
109 |
+
|
110 |
+
```
|
111 |
+
python scripts/symlinks_from_txt_list.py raw_data/npz_540p/ --txt_file txt_files/annotated_and_segprop/train_files_11664.txt -o data/train_set --overwrite
|
112 |
+
python scripts/symlinks_from_txt_list.py raw_data/npz_540p/ --txt_file txt_files/annotated_and_segprop/val_files_605.txt -o data/validation_set --overwrite
|
113 |
+
python scripts/symlinks_from_txt_list.py raw_data/npz_540p/ --txt_file txt_files/annotated_and_segprop/semisup_files_11299.txt -o data/semisupervised_set --overwrite
|
114 |
+
python scripts/symlinks_from_txt_list.py raw_data/npz_540p/ --txt_file txt_files/annotated_and_segprop/test_files_5603.txt -o data/test_set --overwrite
|
115 |
+
python scripts/symlinks_from_txt_list.py raw_data/npz_540p/ --txt_file txt_files/manually_annotated_files/train_files_218.txt -o data/train_set_annotated_only --overwrite
|
116 |
+
python scripts/symlinks_from_txt_list.py raw_data/npz_540p/ --txt_file txt_files/manually_annotated_files/val_files_15.txt -o data/validation_set_annotated_only --overwrite
|
117 |
+
python scripts/symlinks_from_txt_list.py raw_data/npz_540p/ --txt_file txt_files/manually_annotated_files/semisup_files_207.txt -o data/semisupervised_set_annotated_nly --overwrite
|
118 |
+
python scripts/symlinks_from_txt_list.py raw_data/npz_540p/ --txt_file txt_files/manually_annotated_files/test_files_116.txt -o data/test_set_annotated_only --overwrite
|
119 |
+
```
|
120 |
+
|
121 |
+
Note: `add --copy_files` if you want to make copies instead of using symlinks.
|
122 |
+
|
123 |
+
Upon calling this, you should be able to see something like this:
|
124 |
+
```
|
125 |
+
user> ls data/*
|
126 |
+
data/semisupervised_set:
|
127 |
+
depth_dpt edges_dexined opticalflow_rife semantic_mask2former_swin_mapillary_converted
|
128 |
+
depth_sfm_manual202204 normals_sfm_manual202204 rgb semantic_segprop8
|
129 |
+
|
130 |
+
data/semisupervised_set_annotated_nly:
|
131 |
+
depth_dpt edges_dexined opticalflow_rife semantic_mask2former_swin_mapillary_converted
|
132 |
+
depth_sfm_manual202204 normals_sfm_manual202204 rgb semantic_segprop8
|
133 |
+
|
134 |
+
data/test_set:
|
135 |
+
depth_dpt edges_dexined opticalflow_rife semantic_mask2former_swin_mapillary_converted
|
136 |
+
depth_sfm_manual202204 normals_sfm_manual202204 rgb semantic_segprop8
|
137 |
+
|
138 |
+
data/test_set_annotated_nly:
|
139 |
+
depth_dpt edges_dexined opticalflow_rife semantic_mask2former_swin_mapillary_converted
|
140 |
+
depth_sfm_manual202204 normals_sfm_manual202204 rgb semantic_segprop8
|
141 |
+
|
142 |
+
data/train_set:
|
143 |
+
depth_dpt edges_dexined opticalflow_rife semantic_mask2former_swin_mapillary_converted
|
144 |
+
depth_sfm_manual202204 normals_sfm_manual202204 rgb semantic_segprop8
|
145 |
+
|
146 |
+
data/train_set_annotated_only:
|
147 |
+
depth_dpt edges_dexined opticalflow_rife semantic_mask2former_swin_mapillary_converted
|
148 |
+
depth_sfm_manual202204 normals_sfm_manual202204 rgb semantic_segprop8
|
149 |
+
|
150 |
+
data/validation_set:
|
151 |
+
depth_dpt edges_dexined opticalflow_rife semantic_mask2former_swin_mapillary_converted
|
152 |
+
depth_sfm_manual202204 normals_sfm_manual202204 rgb semantic_segprop8
|
153 |
+
|
154 |
+
data/validation_set_annotated_only:
|
155 |
+
depth_dpt edges_dexined opticalflow_rife semantic_mask2former_swin_mapillary_converted
|
156 |
+
depth_sfm_manual202204 normals_sfm_manual202204 rgb semantic_segprop8
|
157 |
+
```
|
158 |
+
|
159 |
+
### 1.2.7 Convert Camera Normals to World Normals
|
160 |
+
|
161 |
+
This is an optional step, but for some use cases, it may be better to use world normals instead of camera normals, which
|
162 |
+
are provided by default in `normals_sfm_manual202204`. To convert, we provide camera rotation matrices in
|
163 |
+
`raw_data/camera_matrics.tar.gz` for all 8 scenes that also have SfM.
|
164 |
+
|
165 |
+
In order to convert, use this function (for each npz file):
|
166 |
+
|
167 |
+
```
|
168 |
+
def convert_camera_to_world(normals: np.ndarray, rotation_matrix: np.ndarray) -> np.ndarray:
|
169 |
+
normals = (normals.copy() - 0.5) * 2 # [-1:1] -> [0:1]
|
170 |
+
camera_normals = camera_normals @ np.linalg.inv(rotation_matrix)
|
171 |
+
camera_normals = (camera_normals / 2) + 0.5 # [0:1] => [-1:1]
|
172 |
+
return np.clip(camera_normals, 0.0, 1.0)
|
173 |
+
```
|
174 |
+
|
175 |
+
</details>
|
176 |
+
|
177 |
+
## 2. Using the data
|
178 |
+
|
179 |
+
As per the split from the paper:
|
180 |
+
|
181 |
+
<details>
|
182 |
+
<summary> Split </summary>
|
183 |
+
<img src="split.png">
|
184 |
+
</details>
|
185 |
+
|
186 |
+
The data is in `data/*` (see the `ls` call above, it should match even if you download from huggingface).
|
187 |
+
|
188 |
+
## 2.1 Using the provided viewer
|
189 |
+
|
190 |
+

|
191 |
+
|
192 |
+
The simplest way to explore the data is to use the [provided notebook](scripts/dronescapes_viewer.ipynb). Upon running
|
193 |
+
it, you should get a collage with all the default tasks, like the picture at the top.
|
194 |
+
|
195 |
+
For a CLI-only method, you can use the provided reader as well:
|
196 |
+
|
197 |
+
```
|
198 |
+
python scripts/dronescapes_viewer.py data/test_set_annotated_only/ # or any of the 8 directories in data/
|
199 |
+
```
|
200 |
+
|
201 |
+
<details>
|
202 |
+
<summary> Expected output </summary>
|
203 |
+
|
204 |
+
```
|
205 |
+
[MultiTaskDataset]
|
206 |
+
- Path: '/export/home/proiecte/aux/mihai_cristian.pirvu/datasets/dronescapes/data/test_set_annotated_only'
|
207 |
+
- Tasks (11): [DepthRepresentation(depth_dpt), DepthRepresentation(depth_sfm_manual202204), DepthRepresentation(depth_ufo), ColorRepresentation(edges_dexined), EdgesRepresentation(edges_gb), NpzRepresentation(normals_sfm_manual202204), OpticalFlowRepresentation(opticalflow_rife), ColorRepresentation(rgb), SemanticRepresentation(semantic_mask2former_swin_mapillary_converted), SemanticRepresentation(semantic_segprop8), ColorRepresentation(softseg_gb)]
|
208 |
+
- Length: 116
|
209 |
+
- Handle missing data mode: 'fill_none'
|
210 |
+
== Shapes ==
|
211 |
+
{'depth_dpt': torch.Size([540, 960]),
|
212 |
+
'depth_sfm_manual202204': torch.Size([540, 960]),
|
213 |
+
'depth_ufo': torch.Size([540, 960, 1]),
|
214 |
+
'edges_dexined': torch.Size([540, 960]),
|
215 |
+
'edges_gb': torch.Size([540, 960, 1]),
|
216 |
+
'normals_sfm_manual202204': torch.Size([540, 960, 3]),
|
217 |
+
'opticalflow_rife': torch.Size([540, 960, 2]),
|
218 |
+
'rgb': torch.Size([540, 960, 3]),
|
219 |
+
'semantic_mask2former_swin_mapillary_converted': torch.Size([540, 960, 8]),
|
220 |
+
'semantic_segprop8': torch.Size([540, 960, 8]),
|
221 |
+
'softseg_gb': torch.Size([540, 960, 3])}
|
222 |
+
== Random loaded item ==
|
223 |
+
{'depth_dpt': tensor[540, 960] n=518400 (2.0Mb) x∈[0.043, 1.000] μ=0.341 σ=0.418,
|
224 |
+
'depth_sfm_manual202204': None,
|
225 |
+
'depth_ufo': tensor[540, 960, 1] n=518400 (2.0Mb) x∈[0.115, 0.588] μ=0.297 σ=0.138,
|
226 |
+
'edges_dexined': tensor[540, 960] n=518400 (2.0Mb) x∈[0.000, 0.004] μ=0.003 σ=0.001,
|
227 |
+
'edges_gb': tensor[540, 960, 1] n=518400 (2.0Mb) x∈[0., 1.000] μ=0.063 σ=0.100,
|
228 |
+
'normals_sfm_manual202204': None,
|
229 |
+
'opticalflow_rife': tensor[540, 960, 2] n=1036800 (4.0Mb) x∈[-0.004, 0.005] μ=0.000 σ=0.000,
|
230 |
+
'rgb': tensor[540, 960, 3] n=1555200 (5.9Mb) x∈[0., 1.000] μ=0.392 σ=0.238,
|
231 |
+
'semantic_mask2former_swin_mapillary_converted': tensor[540, 960, 8] n=4147200 (16Mb) x∈[0., 1.000] μ=0.125 σ=0.331,
|
232 |
+
'semantic_segprop8': tensor[540, 960, 8] n=4147200 (16Mb) x∈[0., 1.000] μ=0.125 σ=0.331,
|
233 |
+
'softseg_gb': tensor[540, 960, 3] n=1555200 (5.9Mb) x∈[0., 0.004] μ=0.002 σ=0.001}
|
234 |
+
== Random loaded batch ==
|
235 |
+
{'depth_dpt': tensor[5, 540, 960] n=2592000 (9.9Mb) x∈[0.043, 1.000] μ=0.340 σ=0.417,
|
236 |
+
'depth_sfm_manual202204': tensor[5, 540, 960] n=2592000 (9.9Mb) NaN!,
|
237 |
+
'depth_ufo': tensor[5, 540, 960, 1] n=2592000 (9.9Mb) x∈[0.115, 0.588] μ=0.296 σ=0.137,
|
238 |
+
'edges_dexined': tensor[5, 540, 960] n=2592000 (9.9Mb) x∈[0.000, 0.004] μ=0.003 σ=0.001,
|
239 |
+
'edges_gb': tensor[5, 540, 960, 1] n=2592000 (9.9Mb) x∈[0., 1.000] μ=0.063 σ=0.102,
|
240 |
+
'normals_sfm_manual202204': tensor[5, 540, 960, 3] n=7776000 (30Mb) NaN!,
|
241 |
+
'opticalflow_rife': tensor[5, 540, 960, 2] n=5184000 (20Mb) x∈[-0.004, 0.006] μ=0.000 σ=0.000,
|
242 |
+
'rgb': tensor[5, 540, 960, 3] n=7776000 (30Mb) x∈[0., 1.000] μ=0.393 σ=0.238,
|
243 |
+
'semantic_mask2former_swin_mapillary_converted': tensor[5, 540, 960, 8] n=20736000 (79Mb) x∈[0., 1.000] μ=0.125 σ=0.331,
|
244 |
+
'semantic_segprop8': tensor[5, 540, 960, 8] n=20736000 (79Mb) x∈[0., 1.000] μ=0.125 σ=0.331,
|
245 |
+
'softseg_gb': tensor[5, 540, 960, 3] n=7776000 (30Mb) x∈[0., 0.004] μ=0.002 σ=0.001}
|
246 |
+
== Random loaded batch using torch DataLoader ==
|
247 |
+
{'depth_dpt': tensor[5, 540, 960] n=2592000 (9.9Mb) x∈[0.025, 1.000] μ=0.216 σ=0.343,
|
248 |
+
'depth_sfm_manual202204': tensor[5, 540, 960] n=2592000 (9.9Mb) x∈[0., 1.000] μ=0.562 σ=0.335 NaN!,
|
249 |
+
'depth_ufo': tensor[5, 540, 960, 1] n=2592000 (9.9Mb) x∈[0.100, 0.580] μ=0.290 σ=0.128,
|
250 |
+
'edges_dexined': tensor[5, 540, 960] n=2592000 (9.9Mb) x∈[0.000, 0.004] μ=0.003 σ=0.001,
|
251 |
+
'edges_gb': tensor[5, 540, 960, 1] n=2592000 (9.9Mb) x∈[0., 1.000] μ=0.079 σ=0.116,
|
252 |
+
'normals_sfm_manual202204': tensor[5, 540, 960, 3] n=7776000 (30Mb) x∈[0.000, 1.000] μ=0.552 σ=0.253 NaN!,
|
253 |
+
'opticalflow_rife': tensor[5, 540, 960, 2] n=5184000 (20Mb) x∈[-0.013, 0.016] μ=0.000 σ=0.004,
|
254 |
+
'rgb': tensor[5, 540, 960, 3] n=7776000 (30Mb) x∈[0., 1.000] μ=0.338 σ=0.237,
|
255 |
+
'semantic_mask2former_swin_mapillary_converted': tensor[5, 540, 960, 8] n=20736000 (79Mb) x∈[0., 1.000] μ=0.125 σ=0.331,
|
256 |
+
'semantic_segprop8': tensor[5, 540, 960, 8] n=20736000 (79Mb) x∈[0., 1.000] μ=0.125 σ=0.331,
|
257 |
+
'softseg_gb': tensor[5, 540, 960, 3] n=7776000 (30Mb) x∈[0., 0.004] μ=0.002 σ=0.001}
|
258 |
+
```
|
259 |
+
</details>
|
260 |
+
|
261 |
+
## 3. Evaluation for semantic segmentation
|
262 |
+
|
263 |
+
We evaluate in the paper on the 3 test scenes (unsees at train) as well as the semi-supervised scenes (seen, but
|
264 |
+
different split) against the human annotated frames. The general evaluation script is in
|
265 |
+
`scripts/evaluate_semantic_segmentation.py`.
|
266 |
+
|
267 |
+
General usage is:
|
268 |
+
```
|
269 |
+
python scripts/evaluate_semantic_segmentation.py y_dir gt_dir -o results.csv --classes C1 C2 .. Cn
|
270 |
+
[--class_weights W1 W2 ... Wn] [--scenes s1 s2 ... sm]
|
271 |
+
```
|
272 |
+
|
273 |
+
<details>
|
274 |
+
<summary> Script explanation </summary>
|
275 |
+
The script is a bit convoluted, so let's break it into parts:
|
276 |
+
|
277 |
+
- `y_dir` and `gt_dir` Two directories of .npz files in the same format as the dataset (y_dir/1.npz, gt_dir/55.npz etc.)
|
278 |
+
- `classes` A list of classes in the order that they appear in the predictions and gt files
|
279 |
+
- `class_weights` (optional, but used in paper) How much to weigh each class. In the paper we compute these weights as
|
280 |
+
the number of pixels in all the dataset (train/val/semisup/test) for each of the 8 classes resulting in the numbers
|
281 |
+
below.
|
282 |
+
- `scenes` if the `y_dir` and `gt_dir` contains multiple scenes that you want to evaluate separately, the script allows
|
283 |
+
you to pass the prefix of all the scenes. For example, in `data/test_set_annotated_only/semantic_segprop8/` there are
|
284 |
+
actually 3 scenes in the npz files and in the paper, we evaluate each scene independently. Even though the script
|
285 |
+
outputs one csv file with predictions for each npz file, the scenes are used for proper aggregation at scene level.
|
286 |
+
</details>
|
287 |
+
|
288 |
+
<details>
|
289 |
+
<summary> Reproducing paper results for Mask2Former </summary>
|
290 |
+
|
291 |
+
```
|
292 |
+
python scripts/evaluate_semantic_segmentation.py \
|
293 |
+
data/test_set_annotated_only/semantic_mask2former_swin_mapillary_converted/ \
|
294 |
+
data/test_set_annotated_only/semantic_segprop8/ \
|
295 |
+
-o results.csv \
|
296 |
+
--classes land forest residential road little-objects water sky hill \
|
297 |
+
--class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 \
|
298 |
+
--scenes barsana_DJI_0500_0501_combined_sliced_2700_14700 comana_DJI_0881_full norway_210821_DJI_0015_full
|
299 |
+
```
|
300 |
+
|
301 |
+
Should output:
|
302 |
+
```
|
303 |
+
scene iou f1
|
304 |
+
barsana_DJI_0500_0501_combined_sliced_2700_14700 63.371 75.338
|
305 |
+
comana_DJI_0881_full 60.559 73.779
|
306 |
+
norway_210821_DJI_0015_full 37.986 45.939
|
307 |
+
mean 53.972 65.019
|
308 |
+
|
309 |
+
```
|
310 |
+
|
311 |
+
Not providing `--scenes` will make an average across all 3 scenes (not average after each metric individually):
|
312 |
+
|
313 |
+
```
|
314 |
+
iou f1
|
315 |
+
scene
|
316 |
+
all 60.456 73.261
|
317 |
+
```
|
318 |
+
</details>
|
319 |
+
|
320 |
+
### 3.1 Official benchmark
|
321 |
+
|
322 |
+
#### IoU
|
323 |
+
|
324 |
+
| method | #paramters | average | barsana_DJI_0500_0501_combined_sliced_2700_14700 | comana_DJI_0881_full | norway_210821_DJI_0015_full |
|
325 |
+
|:-|:-|:-|:-|:-|:-|
|
326 |
+
| [Mask2Former](https://openaccess.thecvf.com/content/CVPR2022/papers/Cheng_Masked-Attention_Mask_Transformer_for_Universal_Image_Segmentation_CVPR_2022_paper.pdf) | 216M | 53.97 | 63.37 | 60.55 | 37.98 |
|
327 |
+
| [NGC(LR)](https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Marcu_Self-Supervised_Hypergraphs_for_Learning_Multiple_World_Interpretations_ICCVW_2023_paper.pdf) | 32M | 40.75 | 46.51 | 45.59 | 30.17 |
|
328 |
+
| [CShift](https://www.bmvc2021-virtualconference.com/assets/papers/0455.pdf)[^1] | n/a | 39.67 | 46.27 | 43.67 | 29.09 |
|
329 |
+
| [NGC](https://cdn.aaai.org/ojs/16283/16283-13-19777-1-2-20210518.pdf)[^1] | 32M | 35.32 | 44.34 | 38.99 | 22.63 |
|
330 |
+
| [SafeUAV](https://openaccess.thecvf.com/content_ECCVW_2018/papers/11130/Marcu_SafeUAV_Learning_to_estimate_depth_and_safe_landing_areas_for_ECCVW_2018_paper.pdf)[^1] | 1.1M | 32.79 | n/a | n/a | n/a |
|
331 |
+
|
332 |
+
[^1]: reported in the [Dronescapes paper](https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Marcu_Self-Supervised_Hypergraphs_for_Learning_Multiple_World_Interpretations_ICCVW_2023_paper.pdf).
|
333 |
+
|
334 |
+
#### F1 Score
|
335 |
+
|
336 |
+
| method | #paramters | average | barsana_DJI_0500_0501_combined_sliced_2700_14700 | comana_DJI_0881_full | norway_210821_DJI_0015_full |
|
337 |
+
|:-|:-|:-|:-|:-|:-|
|
338 |
+
| [Mask2Former](https://openaccess.thecvf.com/content/CVPR2022/papers/Cheng_Masked-Attention_Mask_Transformer_for_Universal_Image_Segmentation_CVPR_2022_paper.pdf) | 216M | 65.01 | 75.33 | 73.77 | 45.93 |
|
339 |
+
|
data/a.npz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b65f01f193fd17ef9e44cb29be0ca169bb84a0bccbbf79d1c599fddb0730377
|
3 |
+
size 1018932
|
raw_data/videos/atanasie_DJI_0652_full/atanasie_DJI_0652_full_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54d836641cca94f1394fd3f357b9c1cde1e6fb72e434fe762aeceda3ccd4d8b6
|
3 |
+
size 106241317
|
raw_data/videos/barsana_DJI_0500_0501_combined_sliced_2700_14700/barsana_DJI_0500_0501_combined_sliced_2700_14700_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6501a871529d35d9cab0007905bd761ee6402decc9e0096468e76c2e7ec503ff
|
3 |
+
size 81439908
|
raw_data/videos/comana_DJI_0881_full/comana_DJI_0881_full_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e755cc002e7a1f182c4f3c38c526252fa9175c95f3c8297073deba5676dc15e
|
3 |
+
size 39167644
|
raw_data/videos/gradistei_DJI_0787_0788_0789_combined_sliced_3510_13110/gradistei_DJI_0787_0788_0789_combined_sliced_3510_13110_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dbc4caa4626e655e30edd5c888a76e8f6a7551a64b6060b3e0331c2fcb093dfa
|
3 |
+
size 120861375
|
raw_data/videos/herculane_DJI_0021_full/herculane_DJI_0021_full_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ebd3807c53c713ed41718a748d388861ed5ca6238bdeb76d54c8c348d5b12e73
|
3 |
+
size 84390268
|
raw_data/videos/jupiter_DJI_0703_0704_0705_combined_sliced_10650_21715/jupiter_DJI_0703_0704_0705_combined_sliced_10650_21715_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67b8e961bd6c160af04a41103df047c64e7e6bda12f9df628abccf9455a08a62
|
3 |
+
size 96971823
|
raw_data/videos/norway_210821_DJI_0015_full/norway_210821_DJI_0015_full_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a42344d37f9cc3f685a7183cf80d245d7c920f8373fc41827b82a4089b10540
|
3 |
+
size 12033190
|
raw_data/videos/olanesti_DJI_0416_full/olanesti_DJI_0416_full_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1974c88b022f4ba9d0f1a063a017fccbad7edd64f20755a09c6d5d3d9cbe0662
|
3 |
+
size 114870840
|
raw_data/videos/petrova_DJI_0525_0526_combined_sliced_2850_11850/petrova_DJI_0525_0526_combined_sliced_2850_11850_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf2a9a9fd11c3322b141cdbcd78400f8a92ee0054fd6cbc6e2e16497de1cee25
|
3 |
+
size 52343989
|
raw_data/videos/slanic_DJI_0956_0957_combined_sliced_780_9780/slanic_DJI_0956_0957_combined_sliced_780_9780_540p.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de4ccbc95da3c20c277d06592a687e761b83a9ef698d4ec78fe6ca2af8f9749b
|
3 |
+
size 79978585
|
scripts/cfg.yaml
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
vre:
|
2 |
+
start_frame: ${oc.env:VRE_START_IX}
|
3 |
+
end_frame: ${oc.env:VRE_END_IX,null}
|
4 |
+
export_npy: True
|
5 |
+
export_png: False
|
6 |
+
exception_mode: skip_representation
|
7 |
+
|
8 |
+
representations:
|
9 |
+
rgb:
|
10 |
+
type: default/rgb
|
11 |
+
dependencies: []
|
12 |
+
parameters: {}
|
13 |
+
|
14 |
+
hsv:
|
15 |
+
type: default/hsv
|
16 |
+
dependencies: []
|
17 |
+
parameters: {}
|
18 |
+
|
19 |
+
halftone1:
|
20 |
+
type: soft-segmentation/python-halftone
|
21 |
+
dependencies: []
|
22 |
+
parameters:
|
23 |
+
sample: 3
|
24 |
+
scale: 1
|
25 |
+
percentage: 91
|
26 |
+
angles: [0, 15, 30, 45]
|
27 |
+
antialias: False
|
28 |
+
resolution: [240, 426]
|
29 |
+
|
30 |
+
edges_canny:
|
31 |
+
type: edges/canny
|
32 |
+
dependencies: []
|
33 |
+
parameters:
|
34 |
+
threshold1: 100
|
35 |
+
threshold2: 200
|
36 |
+
aperture_size: 3
|
37 |
+
l2_gradient: True
|
38 |
+
|
39 |
+
softseg_gb:
|
40 |
+
type: soft-segmentation/generalized_boundaries
|
41 |
+
dependencies: []
|
42 |
+
parameters:
|
43 |
+
use_median_filtering: True
|
44 |
+
adjust_to_rgb: True
|
45 |
+
max_channels: 3
|
46 |
+
|
47 |
+
edges_dexined:
|
48 |
+
type: edges/dexined
|
49 |
+
dependencies: []
|
50 |
+
parameters: {}
|
51 |
+
batch_size: 15
|
52 |
+
device: ${oc.env:VRE_DEVICE,cpu}
|
53 |
+
|
54 |
+
fastsam(s):
|
55 |
+
type: semantic-segmentation/fastsam
|
56 |
+
dependencies: []
|
57 |
+
parameters:
|
58 |
+
variant: fastsam-s
|
59 |
+
iou: 0.9
|
60 |
+
conf: 0.4
|
61 |
+
device: ${oc.env:VRE_DEVICE,cpu}
|
62 |
+
|
63 |
+
depth_dpt:
|
64 |
+
type: depth/dpt
|
65 |
+
dependencies: []
|
66 |
+
parameters: {}
|
67 |
+
batch_size: 10
|
68 |
+
device: ${oc.env:VRE_DEVICE,cpu}
|
69 |
+
|
70 |
+
# only works if fps is also set (for images) via --frame_rate in cli. For videos, it works just fine.
|
71 |
+
opticalflow_rife:
|
72 |
+
type: optical-flow/rife
|
73 |
+
dependencies: []
|
74 |
+
batch_size: 15
|
75 |
+
parameters:
|
76 |
+
uhd: False
|
77 |
+
compute_backward_flow: False
|
78 |
+
device: ${oc.env:VRE_DEVICE,cpu}
|
79 |
+
|
80 |
+
semantic_mask2former_coco_47429163_0:
|
81 |
+
type: semantic-segmentation/mask2former
|
82 |
+
dependencies: []
|
83 |
+
parameters:
|
84 |
+
model_id: "47429163_0"
|
85 |
+
semantic_argmax_only: True
|
86 |
+
batch_size: 1
|
87 |
+
device: ${oc.env:VRE_DEVICE,cpu}
|
88 |
+
|
89 |
+
semantic_mask2former_mapillary_49189528_0:
|
90 |
+
type: semantic-segmentation/mask2former
|
91 |
+
dependencies: []
|
92 |
+
parameters:
|
93 |
+
model_id: "49189528_0"
|
94 |
+
semantic_argmax_only: True
|
95 |
+
batch_size: 1
|
96 |
+
device: ${oc.env:VRE_DEVICE,cpu}
|
97 |
+
|
98 |
+
depth_marigold:
|
99 |
+
type: depth/marigold
|
100 |
+
dependencies: []
|
101 |
+
parameters:
|
102 |
+
variant: marigold-lcm-v1-0
|
103 |
+
denoising_steps: 4
|
104 |
+
ensemble_size: 1
|
105 |
+
processing_resolution: 768
|
106 |
+
batch_size: 15
|
107 |
+
device: ${oc.env:VRE_DEVICE,cpu}
|
scripts/collage_slanic_DJI_0956_0957_combined_sliced_780_9780_3522.npz.png
ADDED
![]() |
Git LFS Details
|
scripts/convert_m2f_to_dronescapes.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""script that converts predictions (.npz) of mask2former (mapillary or coco_panoptic) to dronescapes labels"""
|
2 |
+
from argparse import ArgumentParser, Namespace
|
3 |
+
from pathlib import Path
|
4 |
+
import shutil
|
5 |
+
from functools import partial
|
6 |
+
import numpy as np
|
7 |
+
from tqdm import tqdm
|
8 |
+
from loggez import loggez_logger as logger
|
9 |
+
|
10 |
+
COCO_MAPPING = {
|
11 |
+
"land": ["grass-merged", "dirt-merged", "sand", "gravel", "flower", "playingfield", "snow", "platform"],
|
12 |
+
"forest": ["tree-merged"],
|
13 |
+
"residential": ["building-other-merged", "house", "roof", "fence-merged", "wall-other-merged", "wall-brick", "rock-merged", "tent", "bridge", "bench", "window-other", "fire hydrant", "traffic light", "umbrella", "wall-stone", "clock", "chair", "sports ball", "floor-other-merged", "floor-wood", "stop sign", "door-stuff", "banner", "light", "net", "surfboard", "frisbee", "rug-merged", "potted plant", "parking meter"],
|
14 |
+
"road": ["road", "railroad", "pavement-merged", "stairs"],
|
15 |
+
"little-objects": ["truck", "car", "boat", "horse", "person", "train", "elephant", "bus", "bird", "sheep", "cow", "motorcycle", "dog", "bicycle", "airplane", "kite"],
|
16 |
+
"water": ["river", "water-other", "sea"],
|
17 |
+
"sky": ["sky-other-merged"],
|
18 |
+
"hill": ["mountain-merged"]
|
19 |
+
}
|
20 |
+
|
21 |
+
COCO_CLASSES = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush", "banner", "blanket", "bridge", "cardboard", "counter", "curtain", "door-stuff", "floor-wood", "flower", "fruit", "gravel", "house", "light", "mirror-stuff", "net", "pillow", "platform", "playingfield", "railroad", "river", "road", "roof", "sand", "sea", "shelf", "snow", "stairs", "tent", "towel", "wall-brick", "wall-stone", "wall-tile", "wall-wood", "water-other", "window-blind", "window-other", "tree-merged", "fence-merged", "ceiling-merged", "sky-other-merged", "cabinet-merged", "table-merged", "floor-other-merged", "pavement-merged", "mountain-merged", "grass-merged", "dirt-merged", "paper-merged", "food-other-merged", "building-other-merged", "rock-merged", "wall-other-merged", "rug-merged"]
|
22 |
+
|
23 |
+
MAPILLARY_MAPPING = {
|
24 |
+
"land": ["Terrain", "Sand", "Snow"],
|
25 |
+
"forest": ["Vegetation"],
|
26 |
+
"residential": ["Building", "Utility Pole", "Pole", "Fence", "Wall", "Manhole", "Street Light", "Curb", "Guard Rail", "Caravan", "Junction Box", "Traffic Sign (Front)", "Billboard", "Banner", "Mailbox", "Traffic Sign (Back)", "Bench", "Fire Hydrant", "Trash Can", "CCTV Camera", "Traffic Light", "Barrier", "Rail Track", "Phone Booth", "Curb Cut", "Traffic Sign Frame", "Bike Rack"],
|
27 |
+
"road": ["Road", "Lane Marking - General", "Sidewalk", "Bridge", "Other Vehicle", "Motorcyclist", "Pothole", "Catch Basin", "Car Mount", "Tunnel", "Parking", "Service Lane", "Lane Marking - Crosswalk", "Pedestrian Area", "On Rails", "Bike Lane", "Crosswalk - Plain"],
|
28 |
+
"little-objects": ["Car", "Person", "Truck", "Boat", "Wheeled Slow", "Trailer", "Ground Animal", "Bicycle", "Motorcycle", "Bird", "Bus", "Ego Vehicle", "Bicyclist", "Other Rider"],
|
29 |
+
"water": ["Water"],
|
30 |
+
"sky": ["Sky"],
|
31 |
+
"hill": ["Mountain"]
|
32 |
+
}
|
33 |
+
|
34 |
+
MAPILLARY_CLASSES = ["Bird", "Ground Animal", "Curb", "Fence", "Guard Rail", "Barrier", "Wall", "Bike Lane", "Crosswalk - Plain", "Curb Cut", "Parking", "Pedestrian Area", "Rail Track", "Road", "Service Lane", "Sidewalk", "Bridge", "Building", "Tunnel", "Person", "Bicyclist", "Motorcyclist", "Other Rider", "Lane Marking - Crosswalk", "Lane Marking - General", "Mountain", "Sand", "Sky", "Snow", "Terrain", "Vegetation", "Water", "Banner", "Bench", "Bike Rack", "Billboard", "Catch Basin", "CCTV Camera", "Fire Hydrant", "Junction Box", "Mailbox", "Manhole", "Phone Booth", "Pothole", "Street Light", "Pole", "Traffic Sign Frame", "Utility Pole", "Traffic Light", "Traffic Sign (Back)", "Traffic Sign (Front)", "Trash Can", "Bicycle", "Boat", "Bus", "Car", "Caravan", "Motorcycle", "On Rails", "Other Vehicle", "Trailer", "Truck", "Wheeled Slow", "Car Mount", "Ego Vehicle"]
|
35 |
+
|
36 |
+
def get_args() -> Namespace:
|
37 |
+
parser = ArgumentParser()
|
38 |
+
parser.add_argument("input_path", type=lambda p: Path(p).absolute())
|
39 |
+
parser.add_argument("output_path", type=lambda p: Path(p).absolute())
|
40 |
+
parser.add_argument("mapping_type", choices=["coco", "mapillary"])
|
41 |
+
parser.add_argument("--overwrite", action="store_true")
|
42 |
+
args = parser.parse_args()
|
43 |
+
assert not args.output_path.exists() or args.overwrite, f"{args.output_path} exists. Use --overwrite"
|
44 |
+
if args.output_path.exists():
|
45 |
+
shutil.rmtree(args.output_path)
|
46 |
+
return args
|
47 |
+
|
48 |
+
def do_one(in_out_path: tuple[Path, Path], mapping_type: str):
|
49 |
+
in_path, out_path = in_out_path
|
50 |
+
data = np.load(in_path, allow_pickle=False)
|
51 |
+
data = data if isinstance(data, np.ndarray) else data["arr_0"] # in case on npz, we need this as well
|
52 |
+
|
53 |
+
classes = MAPILLARY_CLASSES if mapping_type == "mapillary" else COCO_CLASSES
|
54 |
+
mapping = MAPILLARY_MAPPING if mapping_type == "mapillary" else COCO_MAPPING
|
55 |
+
mapping_ix = {list(mapping.keys()).index(k): [classes.index(_v) for _v in v] for k, v in mapping.items()}
|
56 |
+
mapping_to_dronescapes = {}
|
57 |
+
for k, v in mapping_ix.items():
|
58 |
+
for _v in v:
|
59 |
+
mapping_to_dronescapes[_v] = k
|
60 |
+
mapped_data = np.vectorize(mapping_to_dronescapes.get)(data).astype(np.uint8)
|
61 |
+
np.savez(out_path, mapped_data)
|
62 |
+
return mapped_data
|
63 |
+
|
64 |
+
def main(args: Namespace):
|
65 |
+
in_files = [x for x in args.input_path.iterdir() if x.suffix == ".npz"]
|
66 |
+
out_files = [args.output_path / x.name for x in in_files]
|
67 |
+
args.output_path.mkdir(exist_ok=False, parents=True)
|
68 |
+
assert len(in_files) > 0, "No .npz files found"
|
69 |
+
logger.info(f"In dir: '{args.input_path}'")
|
70 |
+
logger.info(f"Out dir: '{args.output_path}'")
|
71 |
+
logger.info(f"Found {len(in_files)} to convert. Dataset type: '{args.mapping_type}'")
|
72 |
+
|
73 |
+
items = list(zip(in_files, out_files))
|
74 |
+
for item in tqdm(items):
|
75 |
+
do_one(item, mapping_type=args.mapping_type)
|
76 |
+
|
77 |
+
if __name__ == "__main__":
|
78 |
+
main(get_args())
|
scripts/count_npz.sh
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Usage: bash count_npz npz_540p
|
2 |
+
(
|
3 |
+
echo "scene,repr,counts"
|
4 |
+
ls $1 | while read line; do
|
5 |
+
scene_name=$(echo $line | cut -d "_" -f1);
|
6 |
+
ls $1/$line | while read line2; do
|
7 |
+
n_files=$(find $1/$line/"$line2" -name "*.npz" | wc -l);
|
8 |
+
echo "$scene_name","$line2","$n_files";
|
9 |
+
done;
|
10 |
+
done ) | python -c '''
|
11 |
+
import sys, pandas as pd;
|
12 |
+
df = pd.read_csv(sys.stdin);
|
13 |
+
df2 = df.groupby("scene").apply(lambda x: dict(zip(x["repr"], x["counts"])), include_groups=False).reset_index();
|
14 |
+
df3 = pd.json_normalize(df2[0]).set_index(df2["scene"]).fillna(0).astype(int);
|
15 |
+
df4 = df3.reindex(columns=["rgb", *sorted(x for x in df3.columns if x != "rgb")])
|
16 |
+
df4.columns = [f"{x[0:18]}.." if len(x) > 20 else x for x in df4.columns]
|
17 |
+
print(df4.to_markdown())
|
18 |
+
'''
|
scripts/dronescapes_viewer.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
scripts/dronescapes_viewer.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import sys
|
3 |
+
from pathlib import Path
|
4 |
+
sys.path.append(Path(__file__).parents[1].__str__())
|
5 |
+
from dronescapes_reader import MultiTaskDataset
|
6 |
+
from dronescapes_reader.dronescapes_representations import dronescapes_task_types
|
7 |
+
from pprint import pprint
|
8 |
+
from torch.utils.data import DataLoader
|
9 |
+
import random
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
def main():
|
13 |
+
assert len(sys.argv) == 2, f"Usage ./dronescapes_viewer.py /path/to/dataset"
|
14 |
+
reader = MultiTaskDataset(sys.argv[1], task_names=list(dronescapes_task_types.keys()),
|
15 |
+
task_types=dronescapes_task_types, handle_missing_data="fill_nan",
|
16 |
+
normalization="min_max", cache_task_stats=True)
|
17 |
+
print(reader)
|
18 |
+
|
19 |
+
print("== Shapes ==")
|
20 |
+
pprint(reader.data_shape)
|
21 |
+
|
22 |
+
print("== Random loaded item ==")
|
23 |
+
rand_ix = random.randint(0, len(reader) - 1)
|
24 |
+
data, name, repr_names = reader[rand_ix] # get a random item
|
25 |
+
pprint({k: v for k, v in data.items()})
|
26 |
+
|
27 |
+
img_data = {}
|
28 |
+
for k, v in data.items():
|
29 |
+
img_data[k] = reader.name_to_task[k].plot_fn(v) if v is not None else np.zeros((*reader.data_shape[k][0:2], 3))
|
30 |
+
|
31 |
+
print("== Random loaded batch ==")
|
32 |
+
batch_data, name, repr_names = reader[rand_ix: min(len(reader), rand_ix + 5)] # get a random batch
|
33 |
+
pprint({k: v for k, v in batch_data.items()}) # Nones are converted to 0s automagically
|
34 |
+
|
35 |
+
print("== Random loaded batch using torch DataLoader ==")
|
36 |
+
loader = DataLoader(reader, collate_fn=reader.collate_fn, batch_size=5, shuffle=True)
|
37 |
+
batch_data, name, repr_names = next(iter(loader))
|
38 |
+
pprint({k: v for k, v in batch_data.items()}) # Nones are converted to 0s automagically
|
39 |
+
|
40 |
+
if __name__ == "__main__":
|
41 |
+
main()
|
scripts/eval_script_old.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The old evaluation script.
|
3 |
+
To run, you first need to split the test scenes data into 3 different directories:
|
4 |
+
|
5 |
+
cd /dronescapes/data
|
6 |
+
scenes=(comana barsana norway);
|
7 |
+
for scene in ${scenes[@]} ; do
|
8 |
+
ls test_set_annotated_only | while read task; do
|
9 |
+
mkdir -p test_set_annotated_only_per_scene/$scene/$task;
|
10 |
+
ls test_set_annotated_only/$task | grep "$scene" | while read line; do
|
11 |
+
cp test_set_annotated_only/$task/$line test_set_annotated_only_per_scene/$scene/$task/$line;
|
12 |
+
done;
|
13 |
+
done
|
14 |
+
done
|
15 |
+
|
16 |
+
Then run this:
|
17 |
+
cd /dronescapes/scripts
|
18 |
+
python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/comana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/comana/semantic_mask2former_swin_mapillary_converted/ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 --classes land forest residential road little-objects water sky hill -o results/comana --overwrite
|
19 |
+
python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/barsana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/barsana/semantic_mask2former_swin_mapillary_converted/ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 --classes land forest residential road little-objects water sky hill -o results/barsana --overwrite
|
20 |
+
python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/norway/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/norway/semantic_mask2former_swin_mapillary_converted/ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 --classes land forest residential road little-objects water sky hill -o results/norway --overwrite
|
21 |
+
"""
|
22 |
+
|
23 |
+
from __future__ import annotations
|
24 |
+
import os
|
25 |
+
import numpy as np
|
26 |
+
import pandas as pd
|
27 |
+
from natsort import natsorted
|
28 |
+
from pathlib import Path
|
29 |
+
import shutil
|
30 |
+
import tempfile
|
31 |
+
from tqdm import tqdm
|
32 |
+
|
33 |
+
import argparse
|
34 |
+
import warnings
|
35 |
+
warnings.filterwarnings("ignore")
|
36 |
+
|
37 |
+
def convert_label2multi(label, class_id):
|
38 |
+
out = np.zeros((label.shape[0], label.shape[1]), dtype=np.uint8)
|
39 |
+
data_indices = np.where(np.equal(label, class_id))
|
40 |
+
out[data_indices[0], data_indices[1]] = 1
|
41 |
+
return np.array(out, dtype=bool)
|
42 |
+
|
43 |
+
def process_all_video_frames(gt_files: list[Path], pred_files: list[Path], class_id: int):
|
44 |
+
TP, TN, FP, FN = {}, {}, {}, {}
|
45 |
+
for gt_file, pred_file in tqdm(zip(gt_files, pred_files), total=len(gt_files), desc=f"{class_id=}"):
|
46 |
+
gt_label_raw = np.load(gt_file, allow_pickle=True)["arr_0"]
|
47 |
+
net_label_raw = np.load(pred_file, allow_pickle=True)["arr_0"]
|
48 |
+
gt_label = convert_label2multi(gt_label_raw, class_id)
|
49 |
+
net_label = convert_label2multi(net_label_raw, class_id)
|
50 |
+
|
51 |
+
true_positives = np.count_nonzero(gt_label * net_label)
|
52 |
+
true_negatives = np.count_nonzero((gt_label + net_label) == 0)
|
53 |
+
false_positives = np.count_nonzero((np.array(net_label, dtype=int) - np.array(gt_label, dtype=int)) > 0)
|
54 |
+
false_negatives = np.count_nonzero((np.array(gt_label, dtype=int) - np.array(net_label, dtype=int)) > 0)
|
55 |
+
|
56 |
+
TP[gt_file.name] = true_positives
|
57 |
+
TN[gt_file.name] = true_negatives
|
58 |
+
FP[gt_file.name] = false_positives
|
59 |
+
FN[gt_file.name] = false_negatives
|
60 |
+
df = pd.DataFrame([TP, FP, TN, FN], index=["tp", "fp", "tn", "fn"]).T
|
61 |
+
global_TP, global_TN, global_FP, global_FN = sum(TP.values()), sum(TN.values()), sum(FP.values()), sum(FN.values())
|
62 |
+
global_precision = global_TP / (global_TP + global_FP + np.spacing(1))
|
63 |
+
global_recall = global_TP / (global_TP + global_FN + np.spacing(1))
|
64 |
+
global_f1_score = (2 * global_precision * global_recall) / (global_precision + global_recall + np.spacing(1))
|
65 |
+
global_iou = global_TP / (global_TP + global_FP + global_FN + np.spacing(1))
|
66 |
+
|
67 |
+
return (global_precision, global_recall, global_f1_score, global_iou)
|
68 |
+
|
69 |
+
def join_results(args: argparse.Namespace):
|
70 |
+
out_path = os.path.join(args.out_dir, 'joined_results_' + str(len(args.classes)) + 'classes.txt')
|
71 |
+
out_file = open(out_path, 'w')
|
72 |
+
|
73 |
+
joined_f1_scores_mean = []
|
74 |
+
joined_iou_scores_mean = []
|
75 |
+
|
76 |
+
for CLASS_ID in range(len(args.classes)):
|
77 |
+
RESULT_FILE = os.path.join(args.out_dir, 'evaluation_dronescapes_CLASS_' + str(CLASS_ID) + '.txt')
|
78 |
+
result_file_lines = open(RESULT_FILE, 'r').read().splitlines()
|
79 |
+
for idx, line in enumerate(result_file_lines):
|
80 |
+
if idx != 0:
|
81 |
+
splits = line.split(',')
|
82 |
+
f1_score = float(splits[2])
|
83 |
+
iou_score = float(splits[3])
|
84 |
+
|
85 |
+
out_file.write('------------------------- ' + ' CLASS ' + str(CLASS_ID) + ' - ' + args.classes[CLASS_ID] + ' --------------------------------------------\n')
|
86 |
+
# F1Score
|
87 |
+
out_file.write('F1-Score: ' + str(round(f1_score, 4)) + '\n')
|
88 |
+
# Mean IOU
|
89 |
+
out_file.write('IOU: ' + str(round(iou_score, 4)) + '\n')
|
90 |
+
out_file.write('\n\n')
|
91 |
+
joined_f1_scores_mean.append(f1_score)
|
92 |
+
joined_iou_scores_mean.append(iou_score)
|
93 |
+
|
94 |
+
out_file.write('\n\n')
|
95 |
+
out_file.write('Mean F1-Score all classes: ' + str(round(np.mean(joined_f1_scores_mean), 4)) + '\n')
|
96 |
+
out_file.write('Mean IOU all classes: ' + str(round(np.mean(joined_iou_scores_mean), 4)) + '\n')
|
97 |
+
out_file.write('\n\n')
|
98 |
+
|
99 |
+
out_file.write('\n\n')
|
100 |
+
out_file.write('Weighted Mean F1-Score all classes: ' + str(round(np.sum(np.dot(joined_f1_scores_mean, args.class_weights)), 4)) + '\n')
|
101 |
+
out_file.write('Weighted Mean IOU all classes: ' + str(round(np.sum(np.dot(joined_iou_scores_mean, args.class_weights)), 4)) + '\n')
|
102 |
+
out_file.write('\n\n')
|
103 |
+
|
104 |
+
out_file.close()
|
105 |
+
print(f"Written to '{out_path}'")
|
106 |
+
|
107 |
+
def compat_old_txt_file(args: Namespace):
|
108 |
+
(tempdir := Path(tempfile.TemporaryDirectory().name)).mkdir()
|
109 |
+
(tempdir / "gt").mkdir()
|
110 |
+
(tempdir / "pred").mkdir()
|
111 |
+
print(f"old pattern detected. Copying files to a temp dir: {tempdir}")
|
112 |
+
test_files = natsorted(open(args.txt_path, "r").read().splitlines())
|
113 |
+
scenes = natsorted(set(([os.path.dirname(x) for x in test_files])))
|
114 |
+
assert len(scenes) == 1, scenes
|
115 |
+
files = natsorted([x for x in test_files if scenes[0] in x])
|
116 |
+
gt_files = [f"{args.gt_path}/{f.split('/')[0]}/segprop{len(args.classes)}/{f.split('/')[1]}.npz" for f in files]
|
117 |
+
pred_files = [f"{args.pred_path}/{f.split('/')[0]}/{int(f.split('/')[1]):06}.npz" for f in files]
|
118 |
+
assert all(Path(x).exists() for x in [*gt_files, *pred_files])
|
119 |
+
for _file in gt_files:
|
120 |
+
os.symlink(_file, tempdir / "gt" / Path(_file).name)
|
121 |
+
for _file in pred_files:
|
122 |
+
os.symlink(_file, tempdir / "pred" / Path(_file).name)
|
123 |
+
args.gt_path = tempdir / "gt"
|
124 |
+
args.pred_path = tempdir / "pred"
|
125 |
+
args.txt_path = None
|
126 |
+
|
127 |
+
def main(args: argparse.Namespace):
|
128 |
+
gt_files = natsorted([x for x in args.gt_path.iterdir()], key=lambda x: Path(x).name)
|
129 |
+
pred_files = natsorted([x for x in args.pred_path.iterdir()], key=lambda x: Path(x).name)
|
130 |
+
assert all(Path(x).exists() for x in [*gt_files, *pred_files])
|
131 |
+
global_precision, global_recall, global_f1, global_iou = process_all_video_frames(gt_files, pred_files, args.class_id)
|
132 |
+
|
133 |
+
out_path = os.path.join(args.out_dir, 'evaluation_dronescapes_CLASS_' + str(args.class_id) + '.txt')
|
134 |
+
out_file = open(out_path, 'w')
|
135 |
+
out_file.write('precision,recall,f1,iou\n')
|
136 |
+
out_file.write('{0:.6f},{1:.6f},{2:.6f},{3:.6f}\n'.format(global_precision, global_recall, global_f1, global_iou))
|
137 |
+
out_file.close()
|
138 |
+
print(f"Written to '{out_path}'")
|
139 |
+
|
140 |
+
if __name__ == "__main__":
|
141 |
+
"""
|
142 |
+
Barsana: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20220517_train_on_even_semisup_on_odd_validate_on_last_odd_triplet_journal_split/only_manually_annotated_test_files_36.txt
|
143 |
+
Norce: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20220810_new_norce_clip/only_manually_annotated_test_files_50.txt
|
144 |
+
Comana: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20221208_new_comana_clip/only_manually_annotated_test_files_30.txt
|
145 |
+
gt_path: /Date3/hpc/datasets/dronescapes/all_scenes
|
146 |
+
pred_path/Date3/hpc/code/Mask2Former/demo_dronescapes/outputs_dronescapes_compatible/mapillary_sseg
|
147 |
+
NC = 7
|
148 |
+
CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky']
|
149 |
+
CLASS_WEIGHTS = [0.28172092, 0.37426183, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721]
|
150 |
+
NC = 8
|
151 |
+
CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky', 'hill']
|
152 |
+
CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721, 0.06836531]
|
153 |
+
NC = 10
|
154 |
+
CLASS_NAMES = ['land', 'forest', 'low-level', 'road', 'high-level', 'cars', 'water', 'sky', 'hill', 'person']
|
155 |
+
CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.09954808, 0.05937348, 0.03386891, 0.00445865, 0.05987466, 0.08660721, 0.06836531, 0.00028626]
|
156 |
+
"""
|
157 |
+
parser = argparse.ArgumentParser()
|
158 |
+
parser.add_argument("--gt_path", type=Path, required=True)
|
159 |
+
parser.add_argument("--pred_path", type=Path, required=True)
|
160 |
+
parser.add_argument("--out_dir", "-o", required=True, type=Path, default=Path(__file__).parent / "out_dir")
|
161 |
+
parser.add_argument("--classes", nargs="+")
|
162 |
+
parser.add_argument("--class_weights", type=float, nargs="+", required=True)
|
163 |
+
parser.add_argument("--txt_path")
|
164 |
+
parser.add_argument("--overwrite", action="store_true")
|
165 |
+
args = parser.parse_args()
|
166 |
+
if args.classes is None:
|
167 |
+
print("Class names not provided")
|
168 |
+
args.classes = list(map(str, range(len(args.class_weights))))
|
169 |
+
assert len(args.classes) == len(args.class_weights), (args.classes, args.class_weights)
|
170 |
+
assert len(args.classes) in (7, 8, 10), len(args.classes)
|
171 |
+
assert not args.out_dir.exists() or args.overwrite, f"'{args.out_dir}' exists. Use --overwrite"
|
172 |
+
shutil.rmtree(args.out_dir, ignore_errors=True)
|
173 |
+
os.makedirs(args.out_dir, exist_ok=True)
|
174 |
+
|
175 |
+
if args.txt_path is not None:
|
176 |
+
compat_old_txt_file(args)
|
177 |
+
|
178 |
+
for class_id in range(len(args.classes)):
|
179 |
+
args.class_id = class_id
|
180 |
+
main(args)
|
181 |
+
join_results(args)
|
scripts/evaluate_semantic_segmentation.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Evaluation script for semantic segmentation for dronescapes. Outputs F1Score and mIoU for the classes and each frame.
|
4 |
+
Usage: ./evaluate_semantic_segmentation.py y_dir gt_dir --classes C1 .. Cn [--class_weights W1 .. Wn] -o results.csv
|
5 |
+
"""
|
6 |
+
import sys
|
7 |
+
import os
|
8 |
+
from loggez import loggez_logger as logger
|
9 |
+
from pathlib import Path
|
10 |
+
from argparse import ArgumentParser, Namespace
|
11 |
+
from tempfile import TemporaryDirectory
|
12 |
+
from multiprocessing import Pool
|
13 |
+
from functools import partial
|
14 |
+
from torchmetrics.functional.classification import multiclass_stat_scores
|
15 |
+
from tqdm import tqdm
|
16 |
+
import torch as tr
|
17 |
+
import numpy as np
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
sys.path.append(Path(__file__).parents[1].__str__())
|
21 |
+
from dronescapes_reader import MultiTaskDataset
|
22 |
+
from dronescapes_reader.dronescapes_representations import SemanticRepresentation
|
23 |
+
|
24 |
+
def compute_metrics(tp: np.ndarray, fp: np.ndarray, tn: np.ndarray, fn: np.ndarray) -> pd.DataFrame:
|
25 |
+
precision = tp / (tp + fp)
|
26 |
+
recall = tp / (tp + fn)
|
27 |
+
f1 = 2 * precision * recall / (precision + recall)
|
28 |
+
iou = tp / (tp + fp + fn)
|
29 |
+
return pd.DataFrame([precision, recall, f1, iou], index=["precision", "recall", "f1", "iou"]).T
|
30 |
+
|
31 |
+
def compute_metrics_by_class(df: pd.DataFrame, class_name: str) -> pd.DataFrame:
|
32 |
+
df = df.query("class_name == @class_name").drop(columns="class_name")
|
33 |
+
df.loc["all"] = df.sum()
|
34 |
+
df[["precision", "recall", "f1", "iou"]] = compute_metrics(df["tp"], df["fp"], df["tn"], df["fn"])
|
35 |
+
df.insert(0, "class_name", class_name)
|
36 |
+
df = df.fillna(0).round(3)
|
37 |
+
return df
|
38 |
+
|
39 |
+
def _do_one(i: int, reader: MultiTaskDataset, num_classes: int) -> tuple[tr.Tensor, str]:
|
40 |
+
data, name = reader[i][0:2]
|
41 |
+
y = data["pred"].argmax(-1) if data["pred"].dtype != tr.int64 else data["pred"]
|
42 |
+
gt = data["gt"].argmax(-1) if data["gt"].dtype != tr.int64 else data["gt"]
|
43 |
+
return multiclass_stat_scores(y, gt, num_classes=num_classes, average=None)[:, 0:4], name
|
44 |
+
|
45 |
+
def compute_raw_stats_per_frame(reader: MultiTaskDataset, classes: list[str], n_workers: int = 1) -> pd.DataFrame:
|
46 |
+
res = tr.zeros((len(reader), len(classes), 4)).long() # (N, NC, 4)
|
47 |
+
|
48 |
+
map_fn = map if n_workers == 1 else Pool(n_workers).imap
|
49 |
+
do_one_fn = partial(_do_one, reader=reader, num_classes=len(classes))
|
50 |
+
map_res = list(tqdm(map_fn(do_one_fn, range(len(reader))), total=len(reader)))
|
51 |
+
res, index = tr.stack([x[0] for x in map_res]).reshape(len(reader) * len(classes), 4), [x[1] for x in map_res]
|
52 |
+
|
53 |
+
df = pd.DataFrame(res, index=np.repeat(index, len(classes)), columns=["tp", "fp", "tn", "fn"])
|
54 |
+
df.insert(0, "class_name", np.array(classes)[:, None].repeat(len(index), 1).T.flatten())
|
55 |
+
return df
|
56 |
+
|
57 |
+
def compute_final_per_scene(res: pd.DataFrame, scene: str, classes: list[str],
|
58 |
+
class_weights: list[float]) -> tuple[float, float]:
|
59 |
+
df = res.iloc[[x.startswith(scene) for x in res.index]]
|
60 |
+
# aggregate for this class all the individual predictions
|
61 |
+
df_scene = df[["class_name", "tp", "fp", "tn", "fn"]].groupby("class_name") \
|
62 |
+
.apply(lambda x: x.sum(), include_groups=False).loc[classes]
|
63 |
+
df_metrics = compute_metrics(df_scene["tp"], df_scene["fp"], df_scene["tn"], df_scene["fn"])
|
64 |
+
iou_weighted = (df_metrics["iou"] * class_weights).sum()
|
65 |
+
f1_weighted = (df_metrics["f1"] * class_weights).sum()
|
66 |
+
return scene, iou_weighted, f1_weighted
|
67 |
+
|
68 |
+
def _check_and_symlink_dirs(y_dir: Path, gt_dir: Path) -> Path:
|
69 |
+
"""checks whether the two provided paths are actual full of npz directories and links them together in a tmp dir"""
|
70 |
+
assert (l := {x.name for x in y_dir.iterdir()}) == (r := {x.name for x in gt_dir.iterdir()}), f"{l} \n vs \n {r}"
|
71 |
+
assert all(x.endswith(".npz") for x in [*l, *r]), f"Not dirs of only .npz files: {l} \n {r}"
|
72 |
+
(temp_dir := Path(TemporaryDirectory().name)).mkdir(exist_ok=False)
|
73 |
+
os.symlink(y_dir, temp_dir / "pred")
|
74 |
+
os.symlink(gt_dir, temp_dir / "gt")
|
75 |
+
return temp_dir
|
76 |
+
|
77 |
+
def get_args() -> Namespace:
|
78 |
+
parser = ArgumentParser()
|
79 |
+
parser.add_argument("y_dir", type=lambda p: Path(p).absolute())
|
80 |
+
parser.add_argument("gt_dir", type=lambda p: Path(p).absolute())
|
81 |
+
parser.add_argument("--output_path", "-o", type=Path, required=True)
|
82 |
+
parser.add_argument("--classes", required=True, nargs="+")
|
83 |
+
parser.add_argument("--class_weights", nargs="+", type=float)
|
84 |
+
parser.add_argument("--scenes", nargs="+", default=["all"], help="each scene will get separate metrics if provided")
|
85 |
+
parser.add_argument("--overwrite", action="store_true")
|
86 |
+
parser.add_argument("--n_workers", type=int, default=1)
|
87 |
+
args = parser.parse_args()
|
88 |
+
if args.class_weights is None:
|
89 |
+
logger.info("No class weights provided, defaulting to equal weights.")
|
90 |
+
args.class_weights = [1 / len(args.classes)] * len(args.classes)
|
91 |
+
assert (a := len(args.class_weights)) == (b := len(args.classes)), (a, b)
|
92 |
+
assert np.fabs(sum(args.class_weights) - 1) < 1e-3, (args.class_weights, sum(args.class_weights))
|
93 |
+
assert args.output_path.suffix == ".csv", f"Prediction file must end in .csv, got: '{args.output_path.suffix}'"
|
94 |
+
if len(args.scenes) > 0:
|
95 |
+
logger.info(f"Scenes: {args.scenes}")
|
96 |
+
if args.output_path.exists() and args.overwrite:
|
97 |
+
os.remove(args.output_path)
|
98 |
+
assert args.n_workers >= 1 and isinstance(args.n_workers, int), args.n_workers
|
99 |
+
return args
|
100 |
+
|
101 |
+
def main(args: Namespace):
|
102 |
+
# setup to put both directories in the same parent directory for the reader to work.
|
103 |
+
temp_dir = _check_and_symlink_dirs(args.y_dir, args.gt_dir)
|
104 |
+
pred_repr = SemanticRepresentation("pred", classes=args.classes, color_map=[[0, 0, 0]] * len(args.classes))
|
105 |
+
gt_repr = SemanticRepresentation("gt", classes=args.classes, color_map=[[0, 0, 0]] * len(args.classes))
|
106 |
+
reader = MultiTaskDataset(temp_dir, task_names=["pred", "gt"], task_types={"pred": pred_repr, "gt": gt_repr},
|
107 |
+
handle_missing_data="drop", normalization=None)
|
108 |
+
assert (a := len(reader.files_per_repr["gt"])) == (b := len(reader.files_per_repr["pred"])), f"{a} vs {b}"
|
109 |
+
|
110 |
+
# Compute TP, FP, TN, FN for each frame
|
111 |
+
raw_stats = compute_raw_stats_per_frame(reader, args.classes, args.n_workers)
|
112 |
+
logger.info(f"Stored raw metrics file to: '{args.output_path}'")
|
113 |
+
Path(args.output_path).parent.mkdir(exist_ok=True, parents=True)
|
114 |
+
raw_stats.to_csv(args.output_path)
|
115 |
+
|
116 |
+
# Compute Precision, Recall, F1, IoU for each class and put them together in the same df.
|
117 |
+
metrics_per_class = pd.concat([compute_metrics_by_class(raw_stats, class_name) for class_name in args.classes])
|
118 |
+
|
119 |
+
# Aggregate the class-level metrics to the final metrics based on the class weights (compute globally by stats)
|
120 |
+
final_agg = []
|
121 |
+
for scene in args.scenes: # if we have >1 scene in the test set, aggregate the results for each of them separately
|
122 |
+
final_agg.append(compute_final_per_scene(metrics_per_class, scene, args.classes, args.class_weights))
|
123 |
+
final_agg = pd.DataFrame(final_agg, columns=["scene", "iou", "f1"]).set_index("scene")
|
124 |
+
if len(args.scenes) > 1:
|
125 |
+
final_agg.loc["mean"] = final_agg.mean()
|
126 |
+
final_agg = (final_agg * 100).round(3)
|
127 |
+
print(final_agg)
|
128 |
+
|
129 |
+
if __name__ == "__main__":
|
130 |
+
main(get_args())
|
scripts/semantic_mapper.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
scripts/symlinks_from_txt_list.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import shutil
|
3 |
+
import os
|
4 |
+
from argparse import ArgumentParser, Namespace
|
5 |
+
from pathlib import Path
|
6 |
+
from loggez import loggez_logger as logger
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
def _check_and_find(dataset_dir: Path, output_path: str, scene: str, _repr: str,
|
10 |
+
stem: str, suffix: str) -> tuple[Path, Path] | None:
|
11 |
+
"""handle all weirdnesses on how we store data. VRE uses npy/x.npy, but others have different format too."""
|
12 |
+
out_file = Path(f"{output_path}/{_repr}/{scene}_{stem}.{suffix}") # e:g. rgb/slanic_1.npz
|
13 |
+
if (in_path := (dataset_dir / scene / _repr / f"{stem}.{suffix}")).exists(): # e.g.: slanic/rgb/1.npz
|
14 |
+
return in_path, out_file
|
15 |
+
if (in_path := (dataset_dir / scene / _repr / suffix / f"{stem}.{suffix}")).exists(): # e.g.: slanic/rgb/npz/1.npz
|
16 |
+
return in_path, out_file
|
17 |
+
|
18 |
+
try:
|
19 |
+
int_stem = int(stem)
|
20 |
+
except ValueError:
|
21 |
+
return None
|
22 |
+
|
23 |
+
if (in_path := (dataset_dir / scene / _repr / f"{int_stem:06d}.{suffix}")).exists(): # e.g.: slanic/rgb/000001.npz
|
24 |
+
return in_path, out_file
|
25 |
+
# e.g.: slanic/rgb/npz/000001.npz
|
26 |
+
if (in_path := (dataset_dir / scene / _repr / suffix / f"{int_stem:06d}.{suffix}")).exists():
|
27 |
+
return in_path, out_file
|
28 |
+
return None
|
29 |
+
|
30 |
+
def check_and_gather_all_files(dataset_dir: Path, txt_data: list[tuple[str, str]],
|
31 |
+
output_path: Path, suffix: str) -> dict[str, str]:
|
32 |
+
"""returns a {in_dir/scene/repr/stem.suffix: out_dir/repr/scene/stem.suffix} dict based on dataset_dir"""
|
33 |
+
assert suffix in ("npz", "npy"), suffix
|
34 |
+
scene_reprs = {}
|
35 |
+
symlinks_to_do = {}
|
36 |
+
for scene, stem in tqdm(txt_data, desc="Gather data"):
|
37 |
+
assert (dataset_dir / scene).exists(), f"Scene '{scene}' does not exist in '{dataset_dir}'"
|
38 |
+
if scene not in scene_reprs:
|
39 |
+
scene_reprs[scene] = [x.name for x in (dataset_dir / scene).iterdir() if x.is_dir()]
|
40 |
+
n_found = 0
|
41 |
+
for _repr in scene_reprs[scene]:
|
42 |
+
if (res := _check_and_find(dataset_dir, output_path, scene, _repr, stem, suffix)) is not None:
|
43 |
+
in_file, out_file = res
|
44 |
+
n_found += 1
|
45 |
+
symlinks_to_do[in_file] = out_file
|
46 |
+
assert n_found > 0, f"Stem '{stem}' not found in any repr ({scene_reprs[scene]}) of scene '{scene}'"
|
47 |
+
assert len(symlinks_to_do) > 0
|
48 |
+
logger.info(f"Gathered {len(symlinks_to_do)} symlinks to create")
|
49 |
+
return symlinks_to_do
|
50 |
+
|
51 |
+
def make_partitions_if_needed(symlinks_to_do: dict[str, str], partition_max_size: int) -> dict[str, str]:
|
52 |
+
"""updated from out_dir/repr/0.npz to out_dir/repr/part_0/0.npz if needed"""
|
53 |
+
symlinks_by_repr = {} # gather as {repr: {in_file: out_file}}
|
54 |
+
for k, v in symlinks_to_do.items():
|
55 |
+
_repr = v.parent.name
|
56 |
+
if _repr not in symlinks_by_repr:
|
57 |
+
symlinks_by_repr[_repr] = {}
|
58 |
+
symlinks_by_repr[_repr][k] = v
|
59 |
+
|
60 |
+
new_symlinks_to_do = {}
|
61 |
+
for _repr, repr_files in symlinks_by_repr.items():
|
62 |
+
if (count := len(repr_files)) <= partition_max_size:
|
63 |
+
new_symlinks_to_do = {**new_symlinks_to_do, **repr_files}
|
64 |
+
else:
|
65 |
+
logger.info(f"Representation {_repr} has {count} items which > than {partition_max_size}. Partitioning.")
|
66 |
+
n_parts = (count // partition_max_size) + (count % partition_max_size != 0)
|
67 |
+
repr_files_as_tuple = tuple(repr_files.items())
|
68 |
+
for i in range(n_parts):
|
69 |
+
part = repr_files_as_tuple[i * partition_max_size: (i + 1) * partition_max_size]
|
70 |
+
for in_file, out_file in part: # add the partition subdir
|
71 |
+
new_symlinks_to_do[in_file] = out_file.parent / f"part{i}" / out_file.name
|
72 |
+
assert (a := len(new_symlinks_to_do)) == (b := len(symlinks_to_do)), (a, b)
|
73 |
+
return new_symlinks_to_do
|
74 |
+
|
75 |
+
def read_txt_data(txt_file: Path) -> list[tuple[str, str]]:
|
76 |
+
"""reads the data from the txt file with format scene/stem"""
|
77 |
+
f = open(txt_file, "r")
|
78 |
+
res = []
|
79 |
+
for row in f.readlines():
|
80 |
+
assert len(split_row := row.strip().split("/")) == 2, row
|
81 |
+
res.append(split_row)
|
82 |
+
logger.info(f"Read {len(res)} paths.")
|
83 |
+
return res
|
84 |
+
|
85 |
+
def get_args() -> Namespace:
|
86 |
+
"""cli args"""
|
87 |
+
parser = ArgumentParser()
|
88 |
+
parser.add_argument("dataset_dir", type=lambda p: Path(p).absolute())
|
89 |
+
parser.add_argument("--txt_file", type=Path)
|
90 |
+
parser.add_argument("--output_path", "-o", type=lambda p: Path(p).absolute())
|
91 |
+
parser.add_argument("--overwrite", action="store_true")
|
92 |
+
parser.add_argument("--copy_files", action="store_true")
|
93 |
+
parser.add_argument("--partition_max_size", type=int, default=10000) # thanks huggingface for this
|
94 |
+
args = parser.parse_args()
|
95 |
+
if args.output_path.exists():
|
96 |
+
if args.overwrite:
|
97 |
+
logger.info(f"{args.output_path} exists and --overwrite set, deleting the directory first")
|
98 |
+
shutil.rmtree(args.output_path)
|
99 |
+
else:
|
100 |
+
logger.info(f"{args.output_path} exists but --overwrite not set. Will skip all existing files")
|
101 |
+
assert args.dataset_dir.exists() and args.dataset_dir.is_dir(), f"'{args.dataset_dir}' doesn't exist."
|
102 |
+
assert args.txt_file.exists(), f"'{args.txt_file}' doesn't exist."
|
103 |
+
return args
|
104 |
+
|
105 |
+
def main(args: Namespace):
|
106 |
+
"""main fn"""
|
107 |
+
logger.info(f"\n- In dir: {args.dataset_dir}\n- Out dir: {args.output_path} \n- Symlinks: {not args.copy_files}")
|
108 |
+
args.output_path.mkdir(exist_ok=not args.overwrite, parents=True)
|
109 |
+
txt_data = read_txt_data(args.txt_file)
|
110 |
+
symlinks_to_do = check_and_gather_all_files(args.dataset_dir, txt_data, args.output_path, suffix="npz")
|
111 |
+
symlinks_to_do = make_partitions_if_needed(symlinks_to_do, args.partition_max_size)
|
112 |
+
for in_file, out_file in tqdm(symlinks_to_do.items(), desc="copying" if args.copy_files else "symlinks"):
|
113 |
+
Path(out_file).parent.mkdir(exist_ok=True, parents=True)
|
114 |
+
if Path(out_file).exists():
|
115 |
+
continue
|
116 |
+
if args.copy_files:
|
117 |
+
shutil.copyfile(in_file, out_file, follow_symlinks=True)
|
118 |
+
else:
|
119 |
+
rel_path = f"{os.path.relpath(in_file.parent, out_file.parent)}/{in_file.name}"
|
120 |
+
assert (pth := Path(f"{out_file.parent}/{rel_path}")).exists(), pth
|
121 |
+
os.symlink(rel_path, out_file)
|
122 |
+
|
123 |
+
if __name__ == "__main__":
|
124 |
+
main(get_args())
|