Spaces:
Sleeping
Sleeping
使用huggingface hub尝试更新
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .env.example +6 -6
- .gitattributes +42 -35
- .gitignore +153 -154
- .pre-commit-config.yaml +147 -147
- .project-root +2 -2
- Makefile +30 -30
- README.md +104 -104
- app.py +142 -140
- configs/__init__.py +1 -1
- configs/callbacks/default.yaml +22 -22
- configs/callbacks/early_stopping.yaml +15 -15
- configs/callbacks/model_checkpoint.yaml +17 -17
- configs/callbacks/model_summary.yaml +5 -5
- configs/callbacks/rich_progress_bar.yaml +4 -4
- configs/data/CloudSEN12/README.md +52 -52
- configs/data/GF12-MS-WHU/README.md +72 -72
- configs/data/L8-Biome/README.md +56 -56
- configs/data/celeba.yaml +7 -7
- configs/data/hrcwhu/README.md +56 -56
- configs/data/hrcwhu/hrcwhu.yaml +88 -88
- configs/data/mnist.yaml +6 -6
- configs/debug/default.yaml +35 -35
- configs/debug/fdr.yaml +9 -9
- configs/debug/limit.yaml +12 -12
- configs/debug/overfit.yaml +13 -13
- configs/debug/profiler.yaml +12 -12
- configs/eval.yaml +18 -18
- configs/experiment/hrcwhu_cdnetv1.yaml +46 -46
- configs/experiment/hrcwhu_cdnetv2.yaml +46 -46
- configs/experiment/hrcwhu_dbnet.yaml +47 -47
- configs/experiment/hrcwhu_hrcloudnet.yaml +47 -0
- configs/experiment/hrcwhu_mcdnet.yaml +46 -46
- configs/experiment/hrcwhu_scnn.yaml +46 -46
- configs/experiment/hrcwhu_unet.yaml +67 -67
- configs/experiment/hrcwhu_unetmobv2.yaml +47 -0
- configs/extras/default.yaml +8 -8
- configs/hparams_search/mnist_optuna.yaml +52 -52
- configs/hydra/default.yaml +19 -19
- configs/logger/aim.yaml +28 -28
- configs/logger/comet.yaml +12 -12
- configs/logger/csv.yaml +7 -7
- configs/logger/many_loggers.yaml +9 -9
- configs/logger/mlflow.yaml +12 -12
- configs/logger/neptune.yaml +9 -9
- configs/logger/tensorboard.yaml +10 -10
- configs/logger/wandb.yaml +16 -16
- configs/model/cdnetv1/README.md +117 -117
- configs/model/cdnetv1/cdnetv1.yaml +19 -19
- configs/model/cdnetv2/README.md +90 -90
- configs/model/cdnetv2/cdnetv2.yaml +18 -18
.env.example
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
# example of file for storing private and user specific environment variables, like keys or system paths
|
2 |
-
# rename it to ".env" (excluded from version control by default)
|
3 |
-
# .env is loaded by train.py automatically
|
4 |
-
# hydra allows you to reference variables in .yaml configs with special syntax: ${oc.env:MY_VAR}
|
5 |
-
|
6 |
-
MY_VAR="/home/user/my/system/path"
|
|
|
1 |
+
# example of file for storing private and user specific environment variables, like keys or system paths
|
2 |
+
# rename it to ".env" (excluded from version control by default)
|
3 |
+
# .env is loaded by train.py automatically
|
4 |
+
# hydra allows you to reference variables in .yaml configs with special syntax: ${oc.env:MY_VAR}
|
5 |
+
|
6 |
+
MY_VAR="/home/user/my/system/path"
|
.gitattributes
CHANGED
@@ -1,35 +1,42 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
references/\[2017[[:space:]]RSE\][[:space:]]L8_Biome.pdf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
references/\[2019[[:space:]]ISPRS\][[:space:]]HRC_WHU.pdf filter=lfs diff=lfs merge=lfs -text
|
38 |
+
references/\[2019[[:space:]]TGRS\][[:space:]]CDnet.pdf filter=lfs diff=lfs merge=lfs -text
|
39 |
+
references/\[2021[[:space:]]TGRS\][[:space:]]CDnetV2.pdf filter=lfs diff=lfs merge=lfs -text
|
40 |
+
references/\[2022[[:space:]]TGRS\][[:space:]]DBNet.pdf filter=lfs diff=lfs merge=lfs -text
|
41 |
+
references/\[2024[[:space:]]ISPRS\][[:space:]]SCNN.pdf filter=lfs diff=lfs merge=lfs -text
|
42 |
+
references/\[2024[[:space:]]TGRS\][[:space:]]GaoFen12.pdf filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
@@ -1,154 +1,153 @@
|
|
1 |
-
# Byte-compiled / optimized / DLL files
|
2 |
-
__pycache__/
|
3 |
-
*.py[cod]
|
4 |
-
*$py.class
|
5 |
-
|
6 |
-
# C extensions
|
7 |
-
*.so
|
8 |
-
|
9 |
-
# Distribution / packaging
|
10 |
-
.Python
|
11 |
-
build/
|
12 |
-
develop-eggs/
|
13 |
-
dist/
|
14 |
-
downloads/
|
15 |
-
eggs/
|
16 |
-
.eggs/
|
17 |
-
lib/
|
18 |
-
lib64/
|
19 |
-
parts/
|
20 |
-
sdist/
|
21 |
-
var/
|
22 |
-
wheels/
|
23 |
-
pip-wheel-metadata/
|
24 |
-
share/python-wheels/
|
25 |
-
*.egg-info/
|
26 |
-
.installed.cfg
|
27 |
-
*.egg
|
28 |
-
MANIFEST
|
29 |
-
|
30 |
-
# PyInstaller
|
31 |
-
# Usually these files are written by a python script from a template
|
32 |
-
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
33 |
-
*.manifest
|
34 |
-
*.spec
|
35 |
-
|
36 |
-
# Installer logs
|
37 |
-
pip-log.txt
|
38 |
-
pip-delete-this-directory.txt
|
39 |
-
|
40 |
-
# Unit test / coverage reports
|
41 |
-
htmlcov/
|
42 |
-
.tox/
|
43 |
-
.nox/
|
44 |
-
.coverage
|
45 |
-
.coverage.*
|
46 |
-
.cache
|
47 |
-
nosetests.xml
|
48 |
-
coverage.xml
|
49 |
-
*.cover
|
50 |
-
*.py,cover
|
51 |
-
.hypothesis/
|
52 |
-
.pytest_cache/
|
53 |
-
|
54 |
-
# Translations
|
55 |
-
*.mo
|
56 |
-
*.pot
|
57 |
-
|
58 |
-
# Django stuff:
|
59 |
-
|
60 |
-
local_settings.py
|
61 |
-
db.sqlite3
|
62 |
-
db.sqlite3-journal
|
63 |
-
|
64 |
-
# Flask stuff:
|
65 |
-
instance/
|
66 |
-
.webassets-cache
|
67 |
-
|
68 |
-
# Scrapy stuff:
|
69 |
-
.scrapy
|
70 |
-
|
71 |
-
# Sphinx documentation
|
72 |
-
docs/_build/
|
73 |
-
|
74 |
-
# PyBuilder
|
75 |
-
target/
|
76 |
-
|
77 |
-
# Jupyter Notebook
|
78 |
-
.ipynb_checkpoints
|
79 |
-
|
80 |
-
# IPython
|
81 |
-
profile_default/
|
82 |
-
ipython_config.py
|
83 |
-
|
84 |
-
# pyenv
|
85 |
-
.python-version
|
86 |
-
|
87 |
-
# pipenv
|
88 |
-
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
89 |
-
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
90 |
-
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
91 |
-
# install all needed dependencies.
|
92 |
-
#Pipfile.lock
|
93 |
-
|
94 |
-
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
95 |
-
__pypackages__/
|
96 |
-
|
97 |
-
# Celery stuff
|
98 |
-
celerybeat-schedule
|
99 |
-
celerybeat.pid
|
100 |
-
|
101 |
-
# SageMath parsed files
|
102 |
-
*.sage.py
|
103 |
-
|
104 |
-
# Environments
|
105 |
-
.venv
|
106 |
-
env/
|
107 |
-
venv/
|
108 |
-
ENV/
|
109 |
-
env.bak/
|
110 |
-
venv.bak/
|
111 |
-
|
112 |
-
# Spyder project settings
|
113 |
-
.spyderproject
|
114 |
-
.spyproject
|
115 |
-
|
116 |
-
# Rope project settings
|
117 |
-
.ropeproject
|
118 |
-
|
119 |
-
# mkdocs documentation
|
120 |
-
/site
|
121 |
-
|
122 |
-
# mypy
|
123 |
-
.mypy_cache/
|
124 |
-
.dmypy.json
|
125 |
-
dmypy.json
|
126 |
-
|
127 |
-
# Pyre type checker
|
128 |
-
.pyre/
|
129 |
-
|
130 |
-
### VisualStudioCode
|
131 |
-
.vscode/*
|
132 |
-
!.vscode/settings.json
|
133 |
-
!.vscode/tasks.json
|
134 |
-
!.vscode/launch.json
|
135 |
-
!.vscode/extensions.json
|
136 |
-
*.code-workspace
|
137 |
-
**/.vscode
|
138 |
-
|
139 |
-
# JetBrains
|
140 |
-
.idea/
|
141 |
-
|
142 |
-
# Data & Models
|
143 |
-
*.h5
|
144 |
-
*.tar
|
145 |
-
*.tar.gz
|
146 |
-
|
147 |
-
# Lightning-Hydra-Template
|
148 |
-
configs/local/default.yaml
|
149 |
-
/data/
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
.aim
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
pip-wheel-metadata/
|
24 |
+
share/python-wheels/
|
25 |
+
*.egg-info/
|
26 |
+
.installed.cfg
|
27 |
+
*.egg
|
28 |
+
MANIFEST
|
29 |
+
|
30 |
+
# PyInstaller
|
31 |
+
# Usually these files are written by a python script from a template
|
32 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
33 |
+
*.manifest
|
34 |
+
*.spec
|
35 |
+
|
36 |
+
# Installer logs
|
37 |
+
pip-log.txt
|
38 |
+
pip-delete-this-directory.txt
|
39 |
+
|
40 |
+
# Unit test / coverage reports
|
41 |
+
htmlcov/
|
42 |
+
.tox/
|
43 |
+
.nox/
|
44 |
+
.coverage
|
45 |
+
.coverage.*
|
46 |
+
.cache
|
47 |
+
nosetests.xml
|
48 |
+
coverage.xml
|
49 |
+
*.cover
|
50 |
+
*.py,cover
|
51 |
+
.hypothesis/
|
52 |
+
.pytest_cache/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
target/
|
76 |
+
|
77 |
+
# Jupyter Notebook
|
78 |
+
.ipynb_checkpoints
|
79 |
+
|
80 |
+
# IPython
|
81 |
+
profile_default/
|
82 |
+
ipython_config.py
|
83 |
+
|
84 |
+
# pyenv
|
85 |
+
.python-version
|
86 |
+
|
87 |
+
# pipenv
|
88 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
89 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
90 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
91 |
+
# install all needed dependencies.
|
92 |
+
#Pipfile.lock
|
93 |
+
|
94 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
95 |
+
__pypackages__/
|
96 |
+
|
97 |
+
# Celery stuff
|
98 |
+
celerybeat-schedule
|
99 |
+
celerybeat.pid
|
100 |
+
|
101 |
+
# SageMath parsed files
|
102 |
+
*.sage.py
|
103 |
+
|
104 |
+
# Environments
|
105 |
+
.venv
|
106 |
+
env/
|
107 |
+
venv/
|
108 |
+
ENV/
|
109 |
+
env.bak/
|
110 |
+
venv.bak/
|
111 |
+
|
112 |
+
# Spyder project settings
|
113 |
+
.spyderproject
|
114 |
+
.spyproject
|
115 |
+
|
116 |
+
# Rope project settings
|
117 |
+
.ropeproject
|
118 |
+
|
119 |
+
# mkdocs documentation
|
120 |
+
/site
|
121 |
+
|
122 |
+
# mypy
|
123 |
+
.mypy_cache/
|
124 |
+
.dmypy.json
|
125 |
+
dmypy.json
|
126 |
+
|
127 |
+
# Pyre type checker
|
128 |
+
.pyre/
|
129 |
+
|
130 |
+
### VisualStudioCode
|
131 |
+
.vscode/*
|
132 |
+
!.vscode/settings.json
|
133 |
+
!.vscode/tasks.json
|
134 |
+
!.vscode/launch.json
|
135 |
+
!.vscode/extensions.json
|
136 |
+
*.code-workspace
|
137 |
+
**/.vscode
|
138 |
+
|
139 |
+
# JetBrains
|
140 |
+
.idea/
|
141 |
+
|
142 |
+
# Data & Models
|
143 |
+
*.h5
|
144 |
+
*.tar
|
145 |
+
*.tar.gz
|
146 |
+
|
147 |
+
# Lightning-Hydra-Template
|
148 |
+
configs/local/default.yaml
|
149 |
+
/data/
|
150 |
+
.env
|
151 |
+
|
152 |
+
# Aim logging
|
153 |
+
.aim
|
|
.pre-commit-config.yaml
CHANGED
@@ -1,147 +1,147 @@
|
|
1 |
-
default_language_version:
|
2 |
-
python: python3
|
3 |
-
|
4 |
-
repos:
|
5 |
-
- repo: https://github.com/pre-commit/pre-commit-hooks
|
6 |
-
rev: v4.4.0
|
7 |
-
hooks:
|
8 |
-
# list of supported hooks: https://pre-commit.com/hooks.html
|
9 |
-
- id: trailing-whitespace
|
10 |
-
- id: end-of-file-fixer
|
11 |
-
- id: check-docstring-first
|
12 |
-
- id: check-yaml
|
13 |
-
- id: debug-statements
|
14 |
-
- id: detect-private-key
|
15 |
-
- id: check-executables-have-shebangs
|
16 |
-
- id: check-toml
|
17 |
-
- id: check-case-conflict
|
18 |
-
- id: check-added-large-files
|
19 |
-
|
20 |
-
# python code formatting
|
21 |
-
- repo: https://github.com/psf/black
|
22 |
-
rev: 23.1.0
|
23 |
-
hooks:
|
24 |
-
- id: black
|
25 |
-
args: [--line-length, "99"]
|
26 |
-
|
27 |
-
# python import sorting
|
28 |
-
- repo: https://github.com/PyCQA/isort
|
29 |
-
rev: 5.12.0
|
30 |
-
hooks:
|
31 |
-
- id: isort
|
32 |
-
args: ["--profile", "black", "--filter-files"]
|
33 |
-
|
34 |
-
# python upgrading syntax to newer version
|
35 |
-
- repo: https://github.com/asottile/pyupgrade
|
36 |
-
rev: v3.3.1
|
37 |
-
hooks:
|
38 |
-
- id: pyupgrade
|
39 |
-
args: [--py38-plus]
|
40 |
-
|
41 |
-
# python docstring formatting
|
42 |
-
- repo: https://github.com/myint/docformatter
|
43 |
-
rev: v1.7.4
|
44 |
-
hooks:
|
45 |
-
- id: docformatter
|
46 |
-
args:
|
47 |
-
[
|
48 |
-
--in-place,
|
49 |
-
--wrap-summaries=99,
|
50 |
-
--wrap-descriptions=99,
|
51 |
-
--style=sphinx,
|
52 |
-
--black,
|
53 |
-
]
|
54 |
-
|
55 |
-
# python docstring coverage checking
|
56 |
-
- repo: https://github.com/econchick/interrogate
|
57 |
-
rev: 1.5.0 # or master if you're bold
|
58 |
-
hooks:
|
59 |
-
- id: interrogate
|
60 |
-
args:
|
61 |
-
[
|
62 |
-
--verbose,
|
63 |
-
--fail-under=80,
|
64 |
-
--ignore-init-module,
|
65 |
-
--ignore-init-method,
|
66 |
-
--ignore-module,
|
67 |
-
--ignore-nested-functions,
|
68 |
-
-vv,
|
69 |
-
]
|
70 |
-
|
71 |
-
# python check (PEP8), programming errors and code complexity
|
72 |
-
- repo: https://github.com/PyCQA/flake8
|
73 |
-
rev: 6.0.0
|
74 |
-
hooks:
|
75 |
-
- id: flake8
|
76 |
-
args:
|
77 |
-
[
|
78 |
-
"--extend-ignore",
|
79 |
-
"E203,E402,E501,F401,F841,RST2,RST301",
|
80 |
-
"--exclude",
|
81 |
-
"logs/*,data/*",
|
82 |
-
]
|
83 |
-
additional_dependencies: [flake8-rst-docstrings==0.3.0]
|
84 |
-
|
85 |
-
# python security linter
|
86 |
-
- repo: https://github.com/PyCQA/bandit
|
87 |
-
rev: "1.7.5"
|
88 |
-
hooks:
|
89 |
-
- id: bandit
|
90 |
-
args: ["-s", "B101"]
|
91 |
-
|
92 |
-
# yaml formatting
|
93 |
-
- repo: https://github.com/pre-commit/mirrors-prettier
|
94 |
-
rev: v3.0.0-alpha.6
|
95 |
-
hooks:
|
96 |
-
- id: prettier
|
97 |
-
types: [yaml]
|
98 |
-
exclude: "environment.yaml"
|
99 |
-
|
100 |
-
# shell scripts linter
|
101 |
-
- repo: https://github.com/shellcheck-py/shellcheck-py
|
102 |
-
rev: v0.9.0.2
|
103 |
-
hooks:
|
104 |
-
- id: shellcheck
|
105 |
-
|
106 |
-
# md formatting
|
107 |
-
- repo: https://github.com/executablebooks/mdformat
|
108 |
-
rev: 0.7.16
|
109 |
-
hooks:
|
110 |
-
- id: mdformat
|
111 |
-
args: ["--number"]
|
112 |
-
additional_dependencies:
|
113 |
-
- mdformat-gfm
|
114 |
-
- mdformat-tables
|
115 |
-
- mdformat_frontmatter
|
116 |
-
# - mdformat-toc
|
117 |
-
# - mdformat-black
|
118 |
-
|
119 |
-
# word spelling linter
|
120 |
-
- repo: https://github.com/codespell-project/codespell
|
121 |
-
rev: v2.2.4
|
122 |
-
hooks:
|
123 |
-
- id: codespell
|
124 |
-
args:
|
125 |
-
- --skip=logs/**,data/**,*.ipynb
|
126 |
-
# - --ignore-words-list=abc,def
|
127 |
-
|
128 |
-
# jupyter notebook cell output clearing
|
129 |
-
- repo: https://github.com/kynan/nbstripout
|
130 |
-
rev: 0.6.1
|
131 |
-
hooks:
|
132 |
-
- id: nbstripout
|
133 |
-
|
134 |
-
# jupyter notebook linting
|
135 |
-
- repo: https://github.com/nbQA-dev/nbQA
|
136 |
-
rev: 1.6.3
|
137 |
-
hooks:
|
138 |
-
- id: nbqa-black
|
139 |
-
args: ["--line-length=99"]
|
140 |
-
- id: nbqa-isort
|
141 |
-
args: ["--profile=black"]
|
142 |
-
- id: nbqa-flake8
|
143 |
-
args:
|
144 |
-
[
|
145 |
-
"--extend-ignore=E203,E402,E501,F401,F841",
|
146 |
-
"--exclude=logs/*,data/*",
|
147 |
-
]
|
|
|
1 |
+
default_language_version:
|
2 |
+
python: python3
|
3 |
+
|
4 |
+
repos:
|
5 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
6 |
+
rev: v4.4.0
|
7 |
+
hooks:
|
8 |
+
# list of supported hooks: https://pre-commit.com/hooks.html
|
9 |
+
- id: trailing-whitespace
|
10 |
+
- id: end-of-file-fixer
|
11 |
+
- id: check-docstring-first
|
12 |
+
- id: check-yaml
|
13 |
+
- id: debug-statements
|
14 |
+
- id: detect-private-key
|
15 |
+
- id: check-executables-have-shebangs
|
16 |
+
- id: check-toml
|
17 |
+
- id: check-case-conflict
|
18 |
+
- id: check-added-large-files
|
19 |
+
|
20 |
+
# python code formatting
|
21 |
+
- repo: https://github.com/psf/black
|
22 |
+
rev: 23.1.0
|
23 |
+
hooks:
|
24 |
+
- id: black
|
25 |
+
args: [--line-length, "99"]
|
26 |
+
|
27 |
+
# python import sorting
|
28 |
+
- repo: https://github.com/PyCQA/isort
|
29 |
+
rev: 5.12.0
|
30 |
+
hooks:
|
31 |
+
- id: isort
|
32 |
+
args: ["--profile", "black", "--filter-files"]
|
33 |
+
|
34 |
+
# python upgrading syntax to newer version
|
35 |
+
- repo: https://github.com/asottile/pyupgrade
|
36 |
+
rev: v3.3.1
|
37 |
+
hooks:
|
38 |
+
- id: pyupgrade
|
39 |
+
args: [--py38-plus]
|
40 |
+
|
41 |
+
# python docstring formatting
|
42 |
+
- repo: https://github.com/myint/docformatter
|
43 |
+
rev: v1.7.4
|
44 |
+
hooks:
|
45 |
+
- id: docformatter
|
46 |
+
args:
|
47 |
+
[
|
48 |
+
--in-place,
|
49 |
+
--wrap-summaries=99,
|
50 |
+
--wrap-descriptions=99,
|
51 |
+
--style=sphinx,
|
52 |
+
--black,
|
53 |
+
]
|
54 |
+
|
55 |
+
# python docstring coverage checking
|
56 |
+
- repo: https://github.com/econchick/interrogate
|
57 |
+
rev: 1.5.0 # or master if you're bold
|
58 |
+
hooks:
|
59 |
+
- id: interrogate
|
60 |
+
args:
|
61 |
+
[
|
62 |
+
--verbose,
|
63 |
+
--fail-under=80,
|
64 |
+
--ignore-init-module,
|
65 |
+
--ignore-init-method,
|
66 |
+
--ignore-module,
|
67 |
+
--ignore-nested-functions,
|
68 |
+
-vv,
|
69 |
+
]
|
70 |
+
|
71 |
+
# python check (PEP8), programming errors and code complexity
|
72 |
+
- repo: https://github.com/PyCQA/flake8
|
73 |
+
rev: 6.0.0
|
74 |
+
hooks:
|
75 |
+
- id: flake8
|
76 |
+
args:
|
77 |
+
[
|
78 |
+
"--extend-ignore",
|
79 |
+
"E203,E402,E501,F401,F841,RST2,RST301",
|
80 |
+
"--exclude",
|
81 |
+
"logs/*,data/*",
|
82 |
+
]
|
83 |
+
additional_dependencies: [flake8-rst-docstrings==0.3.0]
|
84 |
+
|
85 |
+
# python security linter
|
86 |
+
- repo: https://github.com/PyCQA/bandit
|
87 |
+
rev: "1.7.5"
|
88 |
+
hooks:
|
89 |
+
- id: bandit
|
90 |
+
args: ["-s", "B101"]
|
91 |
+
|
92 |
+
# yaml formatting
|
93 |
+
- repo: https://github.com/pre-commit/mirrors-prettier
|
94 |
+
rev: v3.0.0-alpha.6
|
95 |
+
hooks:
|
96 |
+
- id: prettier
|
97 |
+
types: [yaml]
|
98 |
+
exclude: "environment.yaml"
|
99 |
+
|
100 |
+
# shell scripts linter
|
101 |
+
- repo: https://github.com/shellcheck-py/shellcheck-py
|
102 |
+
rev: v0.9.0.2
|
103 |
+
hooks:
|
104 |
+
- id: shellcheck
|
105 |
+
|
106 |
+
# md formatting
|
107 |
+
- repo: https://github.com/executablebooks/mdformat
|
108 |
+
rev: 0.7.16
|
109 |
+
hooks:
|
110 |
+
- id: mdformat
|
111 |
+
args: ["--number"]
|
112 |
+
additional_dependencies:
|
113 |
+
- mdformat-gfm
|
114 |
+
- mdformat-tables
|
115 |
+
- mdformat_frontmatter
|
116 |
+
# - mdformat-toc
|
117 |
+
# - mdformat-black
|
118 |
+
|
119 |
+
# word spelling linter
|
120 |
+
- repo: https://github.com/codespell-project/codespell
|
121 |
+
rev: v2.2.4
|
122 |
+
hooks:
|
123 |
+
- id: codespell
|
124 |
+
args:
|
125 |
+
- --skip=logs/**,data/**,*.ipynb
|
126 |
+
# - --ignore-words-list=abc,def
|
127 |
+
|
128 |
+
# jupyter notebook cell output clearing
|
129 |
+
- repo: https://github.com/kynan/nbstripout
|
130 |
+
rev: 0.6.1
|
131 |
+
hooks:
|
132 |
+
- id: nbstripout
|
133 |
+
|
134 |
+
# jupyter notebook linting
|
135 |
+
- repo: https://github.com/nbQA-dev/nbQA
|
136 |
+
rev: 1.6.3
|
137 |
+
hooks:
|
138 |
+
- id: nbqa-black
|
139 |
+
args: ["--line-length=99"]
|
140 |
+
- id: nbqa-isort
|
141 |
+
args: ["--profile=black"]
|
142 |
+
- id: nbqa-flake8
|
143 |
+
args:
|
144 |
+
[
|
145 |
+
"--extend-ignore=E203,E402,E501,F401,F841",
|
146 |
+
"--exclude=logs/*,data/*",
|
147 |
+
]
|
.project-root
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
-
# this file is required for inferring the project root directory
|
2 |
-
# do not delete
|
|
|
1 |
+
# this file is required for inferring the project root directory
|
2 |
+
# do not delete
|
Makefile
CHANGED
@@ -1,30 +1,30 @@
|
|
1 |
-
|
2 |
-
help: ## Show help
|
3 |
-
@grep -E '^[.a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
4 |
-
|
5 |
-
clean: ## Clean autogenerated files
|
6 |
-
rm -rf dist
|
7 |
-
find . -type f -name "*.DS_Store" -ls -delete
|
8 |
-
find . | grep -E "(__pycache__|\.pyc|\.pyo)" | xargs rm -rf
|
9 |
-
find . | grep -E ".pytest_cache" | xargs rm -rf
|
10 |
-
find . | grep -E ".ipynb_checkpoints" | xargs rm -rf
|
11 |
-
rm -f .coverage
|
12 |
-
|
13 |
-
clean-logs: ## Clean logs
|
14 |
-
rm -rf logs/**
|
15 |
-
|
16 |
-
format: ## Run pre-commit hooks
|
17 |
-
pre-commit run -a
|
18 |
-
|
19 |
-
sync: ## Merge changes from main branch to your current branch
|
20 |
-
git pull
|
21 |
-
git pull origin main
|
22 |
-
|
23 |
-
test: ## Run not slow tests
|
24 |
-
pytest -k "not slow"
|
25 |
-
|
26 |
-
test-full: ## Run all tests
|
27 |
-
pytest
|
28 |
-
|
29 |
-
train: ## Train the model
|
30 |
-
python src/train.py
|
|
|
1 |
+
|
2 |
+
help: ## Show help
|
3 |
+
@grep -E '^[.a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
4 |
+
|
5 |
+
clean: ## Clean autogenerated files
|
6 |
+
rm -rf dist
|
7 |
+
find . -type f -name "*.DS_Store" -ls -delete
|
8 |
+
find . | grep -E "(__pycache__|\.pyc|\.pyo)" | xargs rm -rf
|
9 |
+
find . | grep -E ".pytest_cache" | xargs rm -rf
|
10 |
+
find . | grep -E ".ipynb_checkpoints" | xargs rm -rf
|
11 |
+
rm -f .coverage
|
12 |
+
|
13 |
+
clean-logs: ## Clean logs
|
14 |
+
rm -rf logs/**
|
15 |
+
|
16 |
+
format: ## Run pre-commit hooks
|
17 |
+
pre-commit run -a
|
18 |
+
|
19 |
+
sync: ## Merge changes from main branch to your current branch
|
20 |
+
git pull
|
21 |
+
git pull origin main
|
22 |
+
|
23 |
+
test: ## Run not slow tests
|
24 |
+
pytest -k "not slow"
|
25 |
+
|
26 |
+
test-full: ## Run all tests
|
27 |
+
pytest
|
28 |
+
|
29 |
+
train: ## Train the model
|
30 |
+
python src/train.py
|
README.md
CHANGED
@@ -1,104 +1,104 @@
|
|
1 |
-
---
|
2 |
-
title: Cloudseg
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 4.40.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
# Cloud Segmentation
|
13 |
-
|
14 |
-
[](https://huggingface.co/spaces/caixiaoshun/cloudseg)
|
15 |
-
[](https://github.com/pre-commit/pre-commit)
|
16 |
-
[](https://pytorch.org/get-started/locally/)
|
17 |
-
[](https://pytorchlightning.ai/)
|
18 |
-
[](https://hydra.cc/)
|
19 |
-
[](https://github.com/XavierJiezou/cloudseg#license)
|
20 |
-
[](https://github.com/XavierJiezou/cloudseg/graphs/contributors)
|
21 |
-
[](https://github.com/ashleve/lightning-hydra-template)
|
22 |
-
[](https://www.nature.com/articles/nature14539)
|
23 |
-
[](https://papers.nips.cc/paper/2020)
|
24 |
-
|
25 |
-
## Datasets
|
26 |
-
|
27 |
-
```bash
|
28 |
-
cloudseg
|
29 |
-
├── src
|
30 |
-
├── configs
|
31 |
-
├── data
|
32 |
-
│ ├── hrcwhu
|
33 |
-
│ │ ├── train.txt
|
34 |
-
│ │ ├── test.txt
|
35 |
-
│ │ ├── img_dir
|
36 |
-
│ │ │ ├── train
|
37 |
-
│ │ │ ├── test
|
38 |
-
│ │ ├── ann_dir
|
39 |
-
│ │ │ ├── train
|
40 |
-
│ │ │ ├── test
|
41 |
-
```
|
42 |
-
|
43 |
-
## Supported Methods
|
44 |
-
|
45 |
-
- [UNet (MICCAI 2016)](configs/model/unet)
|
46 |
-
- [CDNetv1 (TGRS 2019)](configs/model/cdnetv1)
|
47 |
-
- [CDNetv2 (TGRS 2021)](configs/model/cdnetv2)
|
48 |
-
- [DBNet (TGRS 2022)](configs/model/dbnet)
|
49 |
-
- [HrCloudNet (JEI 2024)](configs/model/hrcloudnet)
|
50 |
-
- [McdNet (International Journal of Applied Earth Observation and Geoinformation 2024)](configs/model/mcdnet)
|
51 |
-
- [Scnn (ISPRS 2024)](configs/model/scnn)
|
52 |
-
|
53 |
-
## Installation
|
54 |
-
|
55 |
-
```bash
|
56 |
-
git clone https://github.com/XavierJiezou/cloudseg.git
|
57 |
-
cd cloudseg
|
58 |
-
conda env create -f environment.yaml
|
59 |
-
conda activate cloudseg
|
60 |
-
```
|
61 |
-
|
62 |
-
## Usage
|
63 |
-
|
64 |
-
**Train model with default configuration**
|
65 |
-
|
66 |
-
```bash
|
67 |
-
# train on CPU
|
68 |
-
python src/train.py trainer=cpu
|
69 |
-
|
70 |
-
# train on GPU
|
71 |
-
python src/train.py trainer=gpu
|
72 |
-
```
|
73 |
-
|
74 |
-
**Train model with chosen experiment configuration from [configs/experiment/](configs/experiment/)**
|
75 |
-
|
76 |
-
```bash
|
77 |
-
python src/train.py experiment=experiment_name.yaml
|
78 |
-
```
|
79 |
-
|
80 |
-
**Tranin Example**
|
81 |
-
|
82 |
-
```bash
|
83 |
-
python src/train.py experiment=hrcwhu_cdnetv1.yaml
|
84 |
-
```
|
85 |
-
|
86 |
-
**You can override any parameter from command line like this**
|
87 |
-
|
88 |
-
```bash
|
89 |
-
python src/train.py trainer.max_epochs=20 data.batch_size=64
|
90 |
-
```
|
91 |
-
|
92 |
-
**Visualization in wandb**
|
93 |
-
|
94 |
-
```bash
|
95 |
-
python wand_vis.py --model-name model_name
|
96 |
-
```
|
97 |
-
|
98 |
-
**Example**
|
99 |
-
|
100 |
-
```bash
|
101 |
-
python wand_vis.py --model-name cdnetv1
|
102 |
-
```
|
103 |
-
|
104 |
-
|
|
|
1 |
+
---
|
2 |
+
title: Cloudseg
|
3 |
+
emoji: 📚
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: red
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.40.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
---
|
12 |
+
# Cloud Segmentation
|
13 |
+
|
14 |
+
[](https://huggingface.co/spaces/caixiaoshun/cloudseg)
|
15 |
+
[](https://github.com/pre-commit/pre-commit)
|
16 |
+
[](https://pytorch.org/get-started/locally/)
|
17 |
+
[](https://pytorchlightning.ai/)
|
18 |
+
[](https://hydra.cc/)
|
19 |
+
[](https://github.com/XavierJiezou/cloudseg#license)
|
20 |
+
[](https://github.com/XavierJiezou/cloudseg/graphs/contributors)
|
21 |
+
[](https://github.com/ashleve/lightning-hydra-template)
|
22 |
+
[](https://www.nature.com/articles/nature14539)
|
23 |
+
[](https://papers.nips.cc/paper/2020)
|
24 |
+
|
25 |
+
## Datasets
|
26 |
+
|
27 |
+
```bash
|
28 |
+
cloudseg
|
29 |
+
├── src
|
30 |
+
├── configs
|
31 |
+
├── data
|
32 |
+
│ ├── hrcwhu
|
33 |
+
│ │ ├── train.txt
|
34 |
+
│ │ ├── test.txt
|
35 |
+
│ │ ├── img_dir
|
36 |
+
│ │ │ ├── train
|
37 |
+
│ │ │ ├── test
|
38 |
+
│ │ ├── ann_dir
|
39 |
+
│ │ │ ├── train
|
40 |
+
│ │ │ ├── test
|
41 |
+
```
|
42 |
+
|
43 |
+
## Supported Methods
|
44 |
+
|
45 |
+
- [UNet (MICCAI 2016)](configs/model/unet)
|
46 |
+
- [CDNetv1 (TGRS 2019)](configs/model/cdnetv1)
|
47 |
+
- [CDNetv2 (TGRS 2021)](configs/model/cdnetv2)
|
48 |
+
- [DBNet (TGRS 2022)](configs/model/dbnet)
|
49 |
+
- [HrCloudNet (JEI 2024)](configs/model/hrcloudnet)
|
50 |
+
- [McdNet (International Journal of Applied Earth Observation and Geoinformation 2024)](configs/model/mcdnet)
|
51 |
+
- [Scnn (ISPRS 2024)](configs/model/scnn)
|
52 |
+
|
53 |
+
## Installation
|
54 |
+
|
55 |
+
```bash
|
56 |
+
git clone https://github.com/XavierJiezou/cloudseg.git
|
57 |
+
cd cloudseg
|
58 |
+
conda env create -f environment.yaml
|
59 |
+
conda activate cloudseg
|
60 |
+
```
|
61 |
+
|
62 |
+
## Usage
|
63 |
+
|
64 |
+
**Train model with default configuration**
|
65 |
+
|
66 |
+
```bash
|
67 |
+
# train on CPU
|
68 |
+
python src/train.py trainer=cpu
|
69 |
+
|
70 |
+
# train on GPU
|
71 |
+
python src/train.py trainer=gpu
|
72 |
+
```
|
73 |
+
|
74 |
+
**Train model with chosen experiment configuration from [configs/experiment/](configs/experiment/)**
|
75 |
+
|
76 |
+
```bash
|
77 |
+
python src/train.py experiment=experiment_name.yaml
|
78 |
+
```
|
79 |
+
|
80 |
+
**Tranin Example**
|
81 |
+
|
82 |
+
```bash
|
83 |
+
python src/train.py experiment=hrcwhu_cdnetv1.yaml
|
84 |
+
```
|
85 |
+
|
86 |
+
**You can override any parameter from command line like this**
|
87 |
+
|
88 |
+
```bash
|
89 |
+
python src/train.py trainer.max_epochs=20 data.batch_size=64
|
90 |
+
```
|
91 |
+
|
92 |
+
**Visualization in wandb**
|
93 |
+
|
94 |
+
```bash
|
95 |
+
python wand_vis.py --model-name model_name
|
96 |
+
```
|
97 |
+
|
98 |
+
**Example**
|
99 |
+
|
100 |
+
```bash
|
101 |
+
python wand_vis.py --model-name cdnetv1
|
102 |
+
```
|
103 |
+
|
104 |
+
|
app.py
CHANGED
@@ -1,140 +1,142 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# @Time : 2024/8/4 下午2:38
|
3 |
-
# @Author : xiaoshun
|
4 |
-
# @Email : [email protected]
|
5 |
-
# @File : app.py
|
6 |
-
# @Software: PyCharm
|
7 |
-
|
8 |
-
from glob import glob
|
9 |
-
|
10 |
-
import albumentations as albu
|
11 |
-
import gradio as gr
|
12 |
-
import numpy as np
|
13 |
-
import torch
|
14 |
-
from PIL import Image
|
15 |
-
from albumentations.pytorch.transforms import ToTensorV2
|
16 |
-
|
17 |
-
from src.models.components.cdnetv1 import CDnetV1
|
18 |
-
from src.models.components.cdnetv2 import CDnetV2
|
19 |
-
from src.models.components.dbnet import DBNet
|
20 |
-
from src.models.components.hrcloudnet import HRCloudNet
|
21 |
-
from src.models.components.mcdnet import MCDNet
|
22 |
-
from src.models.components.scnn import SCNN
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
self.
|
29 |
-
|
30 |
-
"
|
31 |
-
"
|
32 |
-
"
|
33 |
-
"
|
34 |
-
"
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
"""
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
x =
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
"""
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
colors_mask[:, :,
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
if
|
99 |
-
return Image.fromarray(np.uint8(np.random.random((32,32,3)) * 255)), "
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
["examples_png/
|
130 |
-
["examples_png/
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# @Time : 2024/8/4 下午2:38
|
3 |
+
# @Author : xiaoshun
|
4 |
+
# @Email : [email protected]
|
5 |
+
# @File : app.py
|
6 |
+
# @Software: PyCharm
|
7 |
+
|
8 |
+
from glob import glob
|
9 |
+
|
10 |
+
import albumentations as albu
|
11 |
+
import gradio as gr
|
12 |
+
import numpy as np
|
13 |
+
import torch
|
14 |
+
from PIL import Image
|
15 |
+
from albumentations.pytorch.transforms import ToTensorV2
|
16 |
+
|
17 |
+
from src.models.components.cdnetv1 import CDnetV1
|
18 |
+
from src.models.components.cdnetv2 import CDnetV2
|
19 |
+
from src.models.components.dbnet import DBNet
|
20 |
+
from src.models.components.hrcloudnet import HRCloudNet
|
21 |
+
from src.models.components.mcdnet import MCDNet
|
22 |
+
from src.models.components.scnn import SCNN
|
23 |
+
from src.models.components.unetmobv2 import UNetMobV2
|
24 |
+
|
25 |
+
|
26 |
+
class Application:
|
27 |
+
def __init__(self):
|
28 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
29 |
+
self.models = {
|
30 |
+
"cdnetv1": CDnetV1(num_classes=2).to(self.device),
|
31 |
+
"cdnetv2": CDnetV2(num_classes=2).to(self.device),
|
32 |
+
"hrcloudnet": HRCloudNet(num_classes=2).to(self.device),
|
33 |
+
"mcdnet": MCDNet(in_channels=3, num_classes=2).to(self.device),
|
34 |
+
"scnn": SCNN(num_classes=2).to(self.device),
|
35 |
+
"dbnet": DBNet(img_size=256, in_channels=3, num_classes=2).to(
|
36 |
+
self.device
|
37 |
+
),
|
38 |
+
"unetmobv2": UNetMobV2(num_classes=2).to(self.device),
|
39 |
+
}
|
40 |
+
self.__load_weight()
|
41 |
+
self.transform = albu.Compose(
|
42 |
+
[
|
43 |
+
albu.Resize(256, 256, always_apply=True),
|
44 |
+
ToTensorV2(),
|
45 |
+
]
|
46 |
+
)
|
47 |
+
|
48 |
+
def __load_weight(self):
|
49 |
+
"""
|
50 |
+
将模型权重加载进来
|
51 |
+
"""
|
52 |
+
for model_name, model in self.models.items():
|
53 |
+
weight_path = glob(
|
54 |
+
f"logs/train/runs/*{model_name}*/*/checkpoints/*epoch*.ckpt"
|
55 |
+
)[0]
|
56 |
+
weight = torch.load(weight_path, map_location=self.device)
|
57 |
+
state_dict = {}
|
58 |
+
for key, value in weight["state_dict"].items():
|
59 |
+
new_key = key[4:]
|
60 |
+
state_dict[new_key] = value
|
61 |
+
model.load_state_dict(state_dict)
|
62 |
+
model.eval()
|
63 |
+
print(f"{model_name} weight loaded!")
|
64 |
+
|
65 |
+
@torch.no_grad
|
66 |
+
def inference(self, image: torch.Tensor, model_name: str):
|
67 |
+
x = image.float()
|
68 |
+
x = x.unsqueeze(0)
|
69 |
+
x = x.to(self.device)
|
70 |
+
logits = self.models[model_name](x)
|
71 |
+
if isinstance(logits, tuple):
|
72 |
+
logits = logits[0]
|
73 |
+
fake_mask = torch.argmax(logits, 1).detach().cpu().squeeze(0).numpy()
|
74 |
+
return fake_mask
|
75 |
+
|
76 |
+
def give_colors_to_mask(self, mask: np.ndarray):
|
77 |
+
"""
|
78 |
+
赋予mask颜色
|
79 |
+
"""
|
80 |
+
assert len(mask.shape) == 2, "Value Error,mask的形状为(height,width)"
|
81 |
+
colors_mask = np.zeros((mask.shape[0], mask.shape[1], 3)).astype(np.float32)
|
82 |
+
colors = ((255, 255, 255), (128, 192, 128))
|
83 |
+
for color in range(2):
|
84 |
+
segc = mask == color
|
85 |
+
colors_mask[:, :, 0] += segc * (colors[color][0])
|
86 |
+
colors_mask[:, :, 1] += segc * (colors[color][1])
|
87 |
+
colors_mask[:, :, 2] += segc * (colors[color][2])
|
88 |
+
return colors_mask
|
89 |
+
|
90 |
+
def to_pil(self, image: np.ndarray, width=None, height=None):
|
91 |
+
colors_np = self.give_colors_to_mask(image)
|
92 |
+
pil_np = Image.fromarray(np.uint8(colors_np))
|
93 |
+
if width and height:
|
94 |
+
pil_np = pil_np.resize((width, height))
|
95 |
+
return pil_np
|
96 |
+
|
97 |
+
def flip(self, image_pil: Image.Image, model_name: str):
|
98 |
+
if image_pil is None:
|
99 |
+
return Image.fromarray(np.uint8(np.random.random((32, 32, 3)) * 255)), "请上传一张图片"
|
100 |
+
if model_name is None:
|
101 |
+
return Image.fromarray(np.uint8(np.random.random((32, 32, 3)) * 255)), "请选择模型名称"
|
102 |
+
image = np.array(image_pil)
|
103 |
+
raw_height, raw_width = image.shape[0], image.shape[1]
|
104 |
+
transform = self.transform(image=image)
|
105 |
+
image = transform["image"]
|
106 |
+
image = image / 255.0
|
107 |
+
fake_image = self.inference(image, model_name)
|
108 |
+
fake_image = self.to_pil(fake_image, raw_width, raw_height)
|
109 |
+
return fake_image, "success"
|
110 |
+
|
111 |
+
def tiff_to_png(image: Image.Image):
|
112 |
+
if image.format == "TIFF":
|
113 |
+
image = image.convert("RGB")
|
114 |
+
return np.array(image)
|
115 |
+
|
116 |
+
def run(self):
|
117 |
+
app = gr.Interface(
|
118 |
+
self.flip,
|
119 |
+
[
|
120 |
+
gr.Image(sources=["clipboard", "upload"], type="pil"),
|
121 |
+
gr.Radio(
|
122 |
+
["cdnetv1", "cdnetv2", "hrcloud", "mcdnet", "scnn", "dbnet", "unetmobv2"],
|
123 |
+
label="model_name",
|
124 |
+
info="选择使用的模型",
|
125 |
+
),
|
126 |
+
],
|
127 |
+
[gr.Image(), gr.Textbox(label="提示信息")],
|
128 |
+
examples=[
|
129 |
+
["examples_png/barren_11.png", "dbnet"],
|
130 |
+
["examples_png/snow_10.png", "scnn"],
|
131 |
+
["examples_png/vegetation_21.png", "cdnetv2"],
|
132 |
+
["examples_png/water_22.png", "hrcloud"],
|
133 |
+
],
|
134 |
+
title="云检测模型在线演示",
|
135 |
+
submit_btn=gr.Button("Submit", variant="primary")
|
136 |
+
)
|
137 |
+
app.launch(share=True)
|
138 |
+
|
139 |
+
|
140 |
+
if __name__ == "__main__":
|
141 |
+
app = Application()
|
142 |
+
app.run()
|
configs/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1 |
-
# this file is needed here to include configs when building project as a package
|
|
|
1 |
+
# this file is needed here to include configs when building project as a package
|
configs/callbacks/default.yaml
CHANGED
@@ -1,22 +1,22 @@
|
|
1 |
-
defaults:
|
2 |
-
- model_checkpoint
|
3 |
-
- early_stopping
|
4 |
-
- model_summary
|
5 |
-
- rich_progress_bar
|
6 |
-
- _self_
|
7 |
-
|
8 |
-
model_checkpoint:
|
9 |
-
dirpath: ${paths.output_dir}/checkpoints
|
10 |
-
filename: "epoch_{epoch:03d}"
|
11 |
-
monitor: "val/acc"
|
12 |
-
mode: "max"
|
13 |
-
save_last: True
|
14 |
-
auto_insert_metric_name: False
|
15 |
-
|
16 |
-
early_stopping:
|
17 |
-
monitor: "val/acc"
|
18 |
-
patience: 100
|
19 |
-
mode: "max"
|
20 |
-
|
21 |
-
model_summary:
|
22 |
-
max_depth: -1
|
|
|
1 |
+
defaults:
|
2 |
+
- model_checkpoint
|
3 |
+
- early_stopping
|
4 |
+
- model_summary
|
5 |
+
- rich_progress_bar
|
6 |
+
- _self_
|
7 |
+
|
8 |
+
model_checkpoint:
|
9 |
+
dirpath: ${paths.output_dir}/checkpoints
|
10 |
+
filename: "epoch_{epoch:03d}"
|
11 |
+
monitor: "val/acc"
|
12 |
+
mode: "max"
|
13 |
+
save_last: True
|
14 |
+
auto_insert_metric_name: False
|
15 |
+
|
16 |
+
early_stopping:
|
17 |
+
monitor: "val/acc"
|
18 |
+
patience: 100
|
19 |
+
mode: "max"
|
20 |
+
|
21 |
+
model_summary:
|
22 |
+
max_depth: -1
|
configs/callbacks/early_stopping.yaml
CHANGED
@@ -1,15 +1,15 @@
|
|
1 |
-
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.EarlyStopping.html
|
2 |
-
|
3 |
-
early_stopping:
|
4 |
-
_target_: lightning.pytorch.callbacks.EarlyStopping
|
5 |
-
monitor: ??? # quantity to be monitored, must be specified !!!
|
6 |
-
min_delta: 0. # minimum change in the monitored quantity to qualify as an improvement
|
7 |
-
patience: 3 # number of checks with no improvement after which training will be stopped
|
8 |
-
verbose: False # verbosity mode
|
9 |
-
mode: "min" # "max" means higher metric value is better, can be also "min"
|
10 |
-
strict: True # whether to crash the training if monitor is not found in the validation metrics
|
11 |
-
check_finite: True # when set True, stops training when the monitor becomes NaN or infinite
|
12 |
-
stopping_threshold: null # stop training immediately once the monitored quantity reaches this threshold
|
13 |
-
divergence_threshold: null # stop training as soon as the monitored quantity becomes worse than this threshold
|
14 |
-
check_on_train_epoch_end: null # whether to run early stopping at the end of the training epoch
|
15 |
-
# log_rank_zero_only: False # this keyword argument isn't available in stable version
|
|
|
1 |
+
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.EarlyStopping.html
|
2 |
+
|
3 |
+
early_stopping:
|
4 |
+
_target_: lightning.pytorch.callbacks.EarlyStopping
|
5 |
+
monitor: ??? # quantity to be monitored, must be specified !!!
|
6 |
+
min_delta: 0. # minimum change in the monitored quantity to qualify as an improvement
|
7 |
+
patience: 3 # number of checks with no improvement after which training will be stopped
|
8 |
+
verbose: False # verbosity mode
|
9 |
+
mode: "min" # "max" means higher metric value is better, can be also "min"
|
10 |
+
strict: True # whether to crash the training if monitor is not found in the validation metrics
|
11 |
+
check_finite: True # when set True, stops training when the monitor becomes NaN or infinite
|
12 |
+
stopping_threshold: null # stop training immediately once the monitored quantity reaches this threshold
|
13 |
+
divergence_threshold: null # stop training as soon as the monitored quantity becomes worse than this threshold
|
14 |
+
check_on_train_epoch_end: null # whether to run early stopping at the end of the training epoch
|
15 |
+
# log_rank_zero_only: False # this keyword argument isn't available in stable version
|
configs/callbacks/model_checkpoint.yaml
CHANGED
@@ -1,17 +1,17 @@
|
|
1 |
-
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html
|
2 |
-
|
3 |
-
model_checkpoint:
|
4 |
-
_target_: lightning.pytorch.callbacks.ModelCheckpoint
|
5 |
-
dirpath: null # directory to save the model file
|
6 |
-
filename: null # checkpoint filename
|
7 |
-
monitor: null # name of the logged metric which determines when model is improving
|
8 |
-
verbose: False # verbosity mode
|
9 |
-
save_last: null # additionally always save an exact copy of the last checkpoint to a file last.ckpt
|
10 |
-
save_top_k: 1 # save k best models (determined by above metric)
|
11 |
-
mode: "min" # "max" means higher metric value is better, can be also "min"
|
12 |
-
auto_insert_metric_name: True # when True, the checkpoints filenames will contain the metric name
|
13 |
-
save_weights_only: False # if True, then only the model’s weights will be saved
|
14 |
-
every_n_train_steps: null # number of training steps between checkpoints
|
15 |
-
train_time_interval: null # checkpoints are monitored at the specified time interval
|
16 |
-
every_n_epochs: null # number of epochs between checkpoints
|
17 |
-
save_on_train_epoch_end: null # whether to run checkpointing at the end of the training epoch or the end of validation
|
|
|
1 |
+
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html
|
2 |
+
|
3 |
+
model_checkpoint:
|
4 |
+
_target_: lightning.pytorch.callbacks.ModelCheckpoint
|
5 |
+
dirpath: null # directory to save the model file
|
6 |
+
filename: null # checkpoint filename
|
7 |
+
monitor: null # name of the logged metric which determines when model is improving
|
8 |
+
verbose: False # verbosity mode
|
9 |
+
save_last: null # additionally always save an exact copy of the last checkpoint to a file last.ckpt
|
10 |
+
save_top_k: 1 # save k best models (determined by above metric)
|
11 |
+
mode: "min" # "max" means higher metric value is better, can be also "min"
|
12 |
+
auto_insert_metric_name: True # when True, the checkpoints filenames will contain the metric name
|
13 |
+
save_weights_only: False # if True, then only the model’s weights will be saved
|
14 |
+
every_n_train_steps: null # number of training steps between checkpoints
|
15 |
+
train_time_interval: null # checkpoints are monitored at the specified time interval
|
16 |
+
every_n_epochs: null # number of epochs between checkpoints
|
17 |
+
save_on_train_epoch_end: null # whether to run checkpointing at the end of the training epoch or the end of validation
|
configs/callbacks/model_summary.yaml
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichModelSummary.html
|
2 |
-
|
3 |
-
model_summary:
|
4 |
-
_target_: lightning.pytorch.callbacks.RichModelSummary
|
5 |
-
max_depth: 1 # the maximum depth of layer nesting that the summary will include
|
|
|
1 |
+
# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichModelSummary.html
|
2 |
+
|
3 |
+
model_summary:
|
4 |
+
_target_: lightning.pytorch.callbacks.RichModelSummary
|
5 |
+
max_depth: 1 # the maximum depth of layer nesting that the summary will include
|
configs/callbacks/rich_progress_bar.yaml
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
# https://lightning.ai/docs/pytorch/latest/api/lightning.pytorch.callbacks.RichProgressBar.html
|
2 |
-
|
3 |
-
rich_progress_bar:
|
4 |
-
_target_: lightning.pytorch.callbacks.RichProgressBar
|
|
|
1 |
+
# https://lightning.ai/docs/pytorch/latest/api/lightning.pytorch.callbacks.RichProgressBar.html
|
2 |
+
|
3 |
+
rich_progress_bar:
|
4 |
+
_target_: lightning.pytorch.callbacks.RichProgressBar
|
configs/data/CloudSEN12/README.md
CHANGED
@@ -1,52 +1,52 @@
|
|
1 |
-
# CloudSEN12
|
2 |
-
|
3 |
-
> [CloudSEN12, a global dataset for semantic understanding of cloud and cloud shadow in Sentinel-2](https://www.nature.com/articles/s41597-022-01878-2)
|
4 |
-
|
5 |
-
## Introduction
|
6 |
-
|
7 |
-
- [Official Site](https://cloudsen12.github.io/download.html)
|
8 |
-
- [Paper Download](https://www.nature.com/articles/s41597-022-01878-2.pdf)
|
9 |
-
- Data Download: [Hugging Face](https://huggingface.co/datasets/csaybar/CloudSEN12-high)
|
10 |
-
|
11 |
-
## Abstract
|
12 |
-
|
13 |
-
Accurately characterizing clouds and their shadows is a long-standing problem in the Earth Observation community. Recent works showcase the necessity to improve cloud detection methods for imagery acquired by the Sentinel-2 satellites. However, the lack of consensus and transparency in existing reference datasets hampers the benchmarking of current cloud detection methods. Exploiting the analysis-ready data offered by the Copernicus program, we created CloudSEN12, a new multi-temporal global dataset to foster research in cloud and cloud shadow detection. CloudSEN12 has 49,400 image patches, including Sentinel-2 level-1C and level-2A multi-spectral data, Sentinel-1 synthetic aperture radar data, auxiliary remote sensing products, different hand-crafted annotations to label the presence of thick and thin clouds and cloud shadows, and the results from eight state-of-the-art cloud detection algorithms. At present, CloudSEN12 exceeds all previous efforts in terms of annotation richness, scene variability, geographic distribution, metadata complexity, quality control, and number of samples.
|
14 |
-
|
15 |
-
## Dataset
|
16 |
-
|
17 |
-
CloudSEN12 is a LARGE dataset (~1 TB) for cloud semantic understanding that consists of 49,400 image patches (IP) that are evenly spread throughout all continents except Antarctica. Each IP covers 5090 x 5090 meters and contains data from Sentinel-2 levels 1C and 2A, hand-crafted annotations of thick and thin clouds and cloud shadows, Sentinel-1 Synthetic Aperture Radar (SAR), digital elevation model, surface water occurrence, land cover classes, and cloud mask results from six cutting-edge cloud detection algorithms.
|
18 |
-
|
19 |
-

|
20 |
-
|
21 |
-
```
|
22 |
-
name: CloudSEN12
|
23 |
-
source: Sentinel-1,2
|
24 |
-
band: 12
|
25 |
-
resolution: 10m
|
26 |
-
pixel: 512x512
|
27 |
-
train: 8490
|
28 |
-
val: 535
|
29 |
-
test: 975
|
30 |
-
disk: (~1 TB)
|
31 |
-
annotation:
|
32 |
-
- 0: Clear
|
33 |
-
- 1: Thick cloud
|
34 |
-
- 2: Thin cloud
|
35 |
-
- 3: Cloud shadow
|
36 |
-
scene: -
|
37 |
-
```
|
38 |
-
|
39 |
-
## Citation
|
40 |
-
|
41 |
-
```
|
42 |
-
@article{cloudsen12,
|
43 |
-
title={CloudSEN12, a global dataset for semantic understanding of cloud and cloud shadow in Sentinel-2},
|
44 |
-
author={Aybar, Cesar and Ysuhuaylas, Luis and Loja, Jhomira and Gonzales, Karen and Herrera, Fernando and Bautista, Lesly and Yali, Roy and Flores, Angie and Diaz, Lissette and Cuenca, Nicole and others},
|
45 |
-
journal={Scientific data},
|
46 |
-
volume={9},
|
47 |
-
number={1},
|
48 |
-
pages={782},
|
49 |
-
year={2022},
|
50 |
-
publisher={Nature Publishing Group UK London}
|
51 |
-
}
|
52 |
-
```
|
|
|
1 |
+
# CloudSEN12
|
2 |
+
|
3 |
+
> [CloudSEN12, a global dataset for semantic understanding of cloud and cloud shadow in Sentinel-2](https://www.nature.com/articles/s41597-022-01878-2)
|
4 |
+
|
5 |
+
## Introduction
|
6 |
+
|
7 |
+
- [Official Site](https://cloudsen12.github.io/download.html)
|
8 |
+
- [Paper Download](https://www.nature.com/articles/s41597-022-01878-2.pdf)
|
9 |
+
- Data Download: [Hugging Face](https://huggingface.co/datasets/csaybar/CloudSEN12-high)
|
10 |
+
|
11 |
+
## Abstract
|
12 |
+
|
13 |
+
Accurately characterizing clouds and their shadows is a long-standing problem in the Earth Observation community. Recent works showcase the necessity to improve cloud detection methods for imagery acquired by the Sentinel-2 satellites. However, the lack of consensus and transparency in existing reference datasets hampers the benchmarking of current cloud detection methods. Exploiting the analysis-ready data offered by the Copernicus program, we created CloudSEN12, a new multi-temporal global dataset to foster research in cloud and cloud shadow detection. CloudSEN12 has 49,400 image patches, including Sentinel-2 level-1C and level-2A multi-spectral data, Sentinel-1 synthetic aperture radar data, auxiliary remote sensing products, different hand-crafted annotations to label the presence of thick and thin clouds and cloud shadows, and the results from eight state-of-the-art cloud detection algorithms. At present, CloudSEN12 exceeds all previous efforts in terms of annotation richness, scene variability, geographic distribution, metadata complexity, quality control, and number of samples.
|
14 |
+
|
15 |
+
## Dataset
|
16 |
+
|
17 |
+
CloudSEN12 is a LARGE dataset (~1 TB) for cloud semantic understanding that consists of 49,400 image patches (IP) that are evenly spread throughout all continents except Antarctica. Each IP covers 5090 x 5090 meters and contains data from Sentinel-2 levels 1C and 2A, hand-crafted annotations of thick and thin clouds and cloud shadows, Sentinel-1 Synthetic Aperture Radar (SAR), digital elevation model, surface water occurrence, land cover classes, and cloud mask results from six cutting-edge cloud detection algorithms.
|
18 |
+
|
19 |
+

|
20 |
+
|
21 |
+
```
|
22 |
+
name: CloudSEN12
|
23 |
+
source: Sentinel-1,2
|
24 |
+
band: 12
|
25 |
+
resolution: 10m
|
26 |
+
pixel: 512x512
|
27 |
+
train: 8490
|
28 |
+
val: 535
|
29 |
+
test: 975
|
30 |
+
disk: (~1 TB)
|
31 |
+
annotation:
|
32 |
+
- 0: Clear
|
33 |
+
- 1: Thick cloud
|
34 |
+
- 2: Thin cloud
|
35 |
+
- 3: Cloud shadow
|
36 |
+
scene: -
|
37 |
+
```
|
38 |
+
|
39 |
+
## Citation
|
40 |
+
|
41 |
+
```
|
42 |
+
@article{cloudsen12,
|
43 |
+
title={CloudSEN12, a global dataset for semantic understanding of cloud and cloud shadow in Sentinel-2},
|
44 |
+
author={Aybar, Cesar and Ysuhuaylas, Luis and Loja, Jhomira and Gonzales, Karen and Herrera, Fernando and Bautista, Lesly and Yali, Roy and Flores, Angie and Diaz, Lissette and Cuenca, Nicole and others},
|
45 |
+
journal={Scientific data},
|
46 |
+
volume={9},
|
47 |
+
number={1},
|
48 |
+
pages={782},
|
49 |
+
year={2022},
|
50 |
+
publisher={Nature Publishing Group UK London}
|
51 |
+
}
|
52 |
+
```
|
configs/data/GF12-MS-WHU/README.md
CHANGED
@@ -1,72 +1,72 @@
|
|
1 |
-
# GaoFen12
|
2 |
-
|
3 |
-
> [Transferring Deep Models for Cloud Detection in Multisensor Images via Weakly Supervised Learning](https://ieeexplore.ieee.org/document/10436637)
|
4 |
-
|
5 |
-
## Introduction
|
6 |
-
|
7 |
-
- [Official Site](https://github.com/whu-ZSC/GF1-GF2MS-WHU)
|
8 |
-
- [Paper Download](https://zhiweili.net/assets/pdf/2024.2_TGRS_Transferring%20Deep%20Models%20for%20Cloud%20Detection%20in%20Multisensor%20Images%20via%20Weakly%20Supervised%20Learning.pdf)
|
9 |
-
- Data Download: [Baidu Disk](https://pan.baidu.com/s/1kBpym0mW_TS9YL1GQ9t8Hw) (password: 9zuf)
|
10 |
-
|
11 |
-
## Abstract
|
12 |
-
|
13 |
-
Recently, deep learning has been widely used for cloud detection in satellite images; however, due to radiometric and spatial resolution differences in images from different sensors and time-consuming process of manually labeling cloud detection datasets, it is difficult to effectively generalize deep learning models for cloud detection in multisensor images. This article propose a weakly supervised learning method for transferring deep models for cloud detection in multisensor images (TransMCD), which leverages the generalization of deep models and the spectral features of clouds to construct pseudo-label dataset to improve the generalization of models. A deep model is first pretrained using a well-annotated cloud detection dataset, which is used to obtain a rough cloud mask of unlabeled target image. The rough mask can be used to determine the spectral threshold adaptively for cloud segmentation of target image. Block-level pseudo labels with high confidence in target image are selected using the rough mask and spectral mask. Unsupervised segmentation technique is used to construct a high-quality pixel-level pseudo-label dataset. Finally, the pseudo-label dataset is used as supervised information for transferring the pretrained model to target image. The TransMCD method was validated by transferring model trained on 16-m Gaofen-1 wide field of view(WFV)images to 8-m Gaofen-1, 4-m Gaofen-2, and 10-m Sentinel-2 images. The F1-score of the transferred models on target images achieves improvements of 1.23%–9.63% over the pretrained models, which is comparable to the fully-supervised models trained with well-annotated target images, suggesting the efficiency of the TransMCD method for cloud detection in multisensor images.
|
14 |
-
|
15 |
-
## Dataset
|
16 |
-
|
17 |
-
### GF1MS-WHU Dataset
|
18 |
-
|
19 |
-
> The two GF-1 PMS sensors have four MS bands with an 8-m spatial resolution and a panchromatic (PAN) band with a higher spatial resolution of 2 m. The spectral range of the MS bands is identical to that of the WFV sensors. In this study, 141 unlabeled images collected from various regions in China were used as the training data for the proposed method. In addition, 33 labeled images were used as the training data for the fully supervised methods, as well as the validation data for the different methods. The acquisition of the images spanned from June 2014 to December 2020 and encompassed four MS bands in both PMS sensors. Note that Fig. 7 only presents the distribution regions of the labeled images.
|
20 |
-
|
21 |
-
```yaml
|
22 |
-
name: GF1MS-WHU
|
23 |
-
source: GaoFen-1
|
24 |
-
band: 4 (MS)
|
25 |
-
resolution: 8m (MS), 2m (PAN)
|
26 |
-
pixel: 250x250
|
27 |
-
train: 6343
|
28 |
-
val: -
|
29 |
-
test: 4085
|
30 |
-
disk: 10.8GB
|
31 |
-
annotation:
|
32 |
-
- 0: clear sky
|
33 |
-
- 1: cloud
|
34 |
-
scene: [Forest,Urban,Barren,Water,Farmland,Grass,Wetland]
|
35 |
-
```
|
36 |
-
|
37 |
-
### GF2MS-WHU Dataset
|
38 |
-
|
39 |
-
> The GF-2 satellite is configured with two PMS sensors. Each sensor has four MS bands with a 4-m spatial resolution and a PAN band with a 1-m spatial resolution. The GF-2 PMS sensors have the same bandwidth as the GF-1 WFV sensors. In this study, 163 unlabeled images obtained from Hubei, Jilin, and Hainan provinces were used as the training data for the proposed method, and 29 labeled images were used as the training data for the fully supervised methods, as well as the validation data for the different methods. The images were acquired from June 2014 to October 2020 and included four MS bands in both PMS sensors.
|
40 |
-
|
41 |
-
```yaml
|
42 |
-
name: GF2MS-WHU
|
43 |
-
source: GaoFen-2
|
44 |
-
band: 4 (MS)
|
45 |
-
resolution: 4m (MS), 1m (PAN)
|
46 |
-
pixel: 250x250
|
47 |
-
train: 14357
|
48 |
-
val: -
|
49 |
-
test: 7560
|
50 |
-
disk: 26.7GB
|
51 |
-
annotation:
|
52 |
-
- 0: clear sky
|
53 |
-
- 1: cloud
|
54 |
-
scene: [Forest,Urban,Barren,Water,Farmland,Grass,Wetland]
|
55 |
-
```
|
56 |
-
|
57 |
-
|
58 |
-
## Citation
|
59 |
-
|
60 |
-
```bibtex
|
61 |
-
@ARTICLE{gaofen12,
|
62 |
-
author={Zhu, Shaocong and Li, Zhiwei and Shen, Huanfeng},
|
63 |
-
journal={IEEE Transactions on Geoscience and Remote Sensing},
|
64 |
-
title={Transferring Deep Models for Cloud Detection in Multisensor Images via Weakly Supervised Learning},
|
65 |
-
year={2024},
|
66 |
-
volume={62},
|
67 |
-
number={},
|
68 |
-
pages={1-18},
|
69 |
-
keywords={Cloud computing;Clouds;Sensors;Predictive models;Supervised learning;Image segmentation;Deep learning;Cloud detection;deep learning;multisensor images;weakly supervised learning},
|
70 |
-
doi={10.1109/TGRS.2024.3358824}
|
71 |
-
}
|
72 |
-
```
|
|
|
1 |
+
# GaoFen12
|
2 |
+
|
3 |
+
> [Transferring Deep Models for Cloud Detection in Multisensor Images via Weakly Supervised Learning](https://ieeexplore.ieee.org/document/10436637)
|
4 |
+
|
5 |
+
## Introduction
|
6 |
+
|
7 |
+
- [Official Site](https://github.com/whu-ZSC/GF1-GF2MS-WHU)
|
8 |
+
- [Paper Download](https://zhiweili.net/assets/pdf/2024.2_TGRS_Transferring%20Deep%20Models%20for%20Cloud%20Detection%20in%20Multisensor%20Images%20via%20Weakly%20Supervised%20Learning.pdf)
|
9 |
+
- Data Download: [Baidu Disk](https://pan.baidu.com/s/1kBpym0mW_TS9YL1GQ9t8Hw) (password: 9zuf)
|
10 |
+
|
11 |
+
## Abstract
|
12 |
+
|
13 |
+
Recently, deep learning has been widely used for cloud detection in satellite images; however, due to radiometric and spatial resolution differences in images from different sensors and time-consuming process of manually labeling cloud detection datasets, it is difficult to effectively generalize deep learning models for cloud detection in multisensor images. This article propose a weakly supervised learning method for transferring deep models for cloud detection in multisensor images (TransMCD), which leverages the generalization of deep models and the spectral features of clouds to construct pseudo-label dataset to improve the generalization of models. A deep model is first pretrained using a well-annotated cloud detection dataset, which is used to obtain a rough cloud mask of unlabeled target image. The rough mask can be used to determine the spectral threshold adaptively for cloud segmentation of target image. Block-level pseudo labels with high confidence in target image are selected using the rough mask and spectral mask. Unsupervised segmentation technique is used to construct a high-quality pixel-level pseudo-label dataset. Finally, the pseudo-label dataset is used as supervised information for transferring the pretrained model to target image. The TransMCD method was validated by transferring model trained on 16-m Gaofen-1 wide field of view(WFV)images to 8-m Gaofen-1, 4-m Gaofen-2, and 10-m Sentinel-2 images. The F1-score of the transferred models on target images achieves improvements of 1.23%–9.63% over the pretrained models, which is comparable to the fully-supervised models trained with well-annotated target images, suggesting the efficiency of the TransMCD method for cloud detection in multisensor images.
|
14 |
+
|
15 |
+
## Dataset
|
16 |
+
|
17 |
+
### GF1MS-WHU Dataset
|
18 |
+
|
19 |
+
> The two GF-1 PMS sensors have four MS bands with an 8-m spatial resolution and a panchromatic (PAN) band with a higher spatial resolution of 2 m. The spectral range of the MS bands is identical to that of the WFV sensors. In this study, 141 unlabeled images collected from various regions in China were used as the training data for the proposed method. In addition, 33 labeled images were used as the training data for the fully supervised methods, as well as the validation data for the different methods. The acquisition of the images spanned from June 2014 to December 2020 and encompassed four MS bands in both PMS sensors. Note that Fig. 7 only presents the distribution regions of the labeled images.
|
20 |
+
|
21 |
+
```yaml
|
22 |
+
name: GF1MS-WHU
|
23 |
+
source: GaoFen-1
|
24 |
+
band: 4 (MS)
|
25 |
+
resolution: 8m (MS), 2m (PAN)
|
26 |
+
pixel: 250x250
|
27 |
+
train: 6343
|
28 |
+
val: -
|
29 |
+
test: 4085
|
30 |
+
disk: 10.8GB
|
31 |
+
annotation:
|
32 |
+
- 0: clear sky
|
33 |
+
- 1: cloud
|
34 |
+
scene: [Forest,Urban,Barren,Water,Farmland,Grass,Wetland]
|
35 |
+
```
|
36 |
+
|
37 |
+
### GF2MS-WHU Dataset
|
38 |
+
|
39 |
+
> The GF-2 satellite is configured with two PMS sensors. Each sensor has four MS bands with a 4-m spatial resolution and a PAN band with a 1-m spatial resolution. The GF-2 PMS sensors have the same bandwidth as the GF-1 WFV sensors. In this study, 163 unlabeled images obtained from Hubei, Jilin, and Hainan provinces were used as the training data for the proposed method, and 29 labeled images were used as the training data for the fully supervised methods, as well as the validation data for the different methods. The images were acquired from June 2014 to October 2020 and included four MS bands in both PMS sensors.
|
40 |
+
|
41 |
+
```yaml
|
42 |
+
name: GF2MS-WHU
|
43 |
+
source: GaoFen-2
|
44 |
+
band: 4 (MS)
|
45 |
+
resolution: 4m (MS), 1m (PAN)
|
46 |
+
pixel: 250x250
|
47 |
+
train: 14357
|
48 |
+
val: -
|
49 |
+
test: 7560
|
50 |
+
disk: 26.7GB
|
51 |
+
annotation:
|
52 |
+
- 0: clear sky
|
53 |
+
- 1: cloud
|
54 |
+
scene: [Forest,Urban,Barren,Water,Farmland,Grass,Wetland]
|
55 |
+
```
|
56 |
+
|
57 |
+
|
58 |
+
## Citation
|
59 |
+
|
60 |
+
```bibtex
|
61 |
+
@ARTICLE{gaofen12,
|
62 |
+
author={Zhu, Shaocong and Li, Zhiwei and Shen, Huanfeng},
|
63 |
+
journal={IEEE Transactions on Geoscience and Remote Sensing},
|
64 |
+
title={Transferring Deep Models for Cloud Detection in Multisensor Images via Weakly Supervised Learning},
|
65 |
+
year={2024},
|
66 |
+
volume={62},
|
67 |
+
number={},
|
68 |
+
pages={1-18},
|
69 |
+
keywords={Cloud computing;Clouds;Sensors;Predictive models;Supervised learning;Image segmentation;Deep learning;Cloud detection;deep learning;multisensor images;weakly supervised learning},
|
70 |
+
doi={10.1109/TGRS.2024.3358824}
|
71 |
+
}
|
72 |
+
```
|
configs/data/L8-Biome/README.md
CHANGED
@@ -1,56 +1,56 @@
|
|
1 |
-
# L8-Biome
|
2 |
-
|
3 |
-
> [Cloud detection algorithm comparison and validation for operational Landsat data products](https://www.sciencedirect.com/science/article/abs/pii/S0034425717301293)
|
4 |
-
|
5 |
-
## Introduction
|
6 |
-
|
7 |
-
- [Official Site](https://landsat.usgs.gov/landsat-8-cloud-cover-assessment-validation-data)
|
8 |
-
- [Paper Download](https://gerslab.cahnr.uconn.edu/wp-content/uploads/sites/2514/2021/06/1-s2.0-S0034425717301293-Steve_Foga_cloud_detection_2017.pdf)
|
9 |
-
- Data Download: [USGS](https://landsat.usgs.gov/landsat-8-cloud-cover-assessment-validation-data)
|
10 |
-
|
11 |
-
## Abstract
|
12 |
-
|
13 |
-
Clouds are a pervasive and unavoidable issue in satellite-borne optical imagery. Accurate, well-documented, and automated cloud detection algorithms are necessary to effectively leverage large collections of remotely sensed data. The Landsat project is uniquely suited for comparative validation of cloud assessment algorithms because the modular architecture of the Landsat ground system allows for quick evaluation of new code, and because Landsat has the most comprehensive manual truth masks of any current satellite data archive. Currently, the Landsat Level-1 Product Generation System (LPGS) uses separate algorithms for determining clouds, cirrus clouds, and snow and/or ice probability on a per-pixel basis. With more bands onboard the Landsat 8 Operational Land Imager (OLI)/Thermal Infrared Sensor (TIRS) satellite, and a greater number of cloud masking algorithms, the U.S. Geological Survey (USGS) is replacing the current cloud masking workflow with a more robust algorithm that is capable of working across multiple Landsat sensors with minimal modification. Because of the inherent error from stray light and intermittent data availability of TIRS, these algorithms need to operate both with and without thermal data. In this study, we created a workflow to evaluate cloud and cloud shadow masking algorithms using cloud validation masks manually derived from both Landsat 7 Enhanced Thematic Mapper Plus (ETM+) and Landsat 8 OLI/TIRS data. We created a new validation dataset consisting of 96 Landsat 8 scenes, representing different biomes and proportions of cloud cover. We evaluated algorithm performance by overall accuracy, omission error, and commission error for both cloud and cloud shadow. We found that CFMask, C code based on the Function of Mask (Fmask) algorithm, and its confidence bands have the best overall accuracy among the many algorithms tested using our validation data. The Artificial Thermal-Automated Cloud Cover Algorithm (AT-ACCA) is the most accurate nonthermal-based algorithm. We give preference to CFMask for operational cloud and cloud shadow detection, as it is derived from a priori knowledge of physical phenomena and is operable without geographic restriction, making it useful for current and future land imaging missions without having to be retrained in a machine-learning environment.
|
14 |
-
|
15 |
-
## Dataset
|
16 |
-
|
17 |
-
This collection contains 96 Pre-Collection Landsat 8 Operational Land Imager (OLI) Thermal Infrared Sensor (TIRS) terrain-corrected (Level-1T) scenes, displayed in the biomes listed below. Manually generated cloud masks are used to validate cloud cover assessment algorithms, which in turn are intended to compute the percentage of cloud cover in each scene.
|
18 |
-
|
19 |
-

|
20 |
-
|
21 |
-
|
22 |
-
```yaml
|
23 |
-
name: hrc_whu
|
24 |
-
source: Landsat-8 OLI/TIRS
|
25 |
-
band: 9
|
26 |
-
resolution: 30m
|
27 |
-
pixel: ∼7000 × 6000
|
28 |
-
train: -
|
29 |
-
val: -
|
30 |
-
test: -
|
31 |
-
disk: 88GB
|
32 |
-
annotation:
|
33 |
-
- 0: Fill
|
34 |
-
- 64: Cloud Shadow
|
35 |
-
- 128: Clear
|
36 |
-
- 192: Thin Cloud
|
37 |
-
- 255: Cloud
|
38 |
-
scene: [Barren,Forest,Grass/Crops,Shrubland,Snow/Ice,Urban,Water,Wetlands]
|
39 |
-
```
|
40 |
-
|
41 |
-
## Citation
|
42 |
-
|
43 |
-
```bibtex
|
44 |
-
@article{l8biome,
|
45 |
-
title = {Cloud detection algorithm comparison and validation for operational Landsat data products},
|
46 |
-
journal = {Remote Sensing of Environment},
|
47 |
-
volume = {194},
|
48 |
-
pages = {379-390},
|
49 |
-
year = {2017},
|
50 |
-
issn = {0034-4257},
|
51 |
-
doi = {https://doi.org/10.1016/j.rse.2017.03.026},
|
52 |
-
url = {https://www.sciencedirect.com/science/article/pii/S0034425717301293},
|
53 |
-
author = {Steve Foga and Pat L. Scaramuzza and Song Guo and Zhe Zhu and Ronald D. Dilley and Tim Beckmann and Gail L. Schmidt and John L. Dwyer and M. {Joseph Hughes} and Brady Laue},
|
54 |
-
keywords = {Landsat, CFMask, Cloud detection, Cloud validation masks, Biome sampling, Data products},
|
55 |
-
}
|
56 |
-
```
|
|
|
1 |
+
# L8-Biome
|
2 |
+
|
3 |
+
> [Cloud detection algorithm comparison and validation for operational Landsat data products](https://www.sciencedirect.com/science/article/abs/pii/S0034425717301293)
|
4 |
+
|
5 |
+
## Introduction
|
6 |
+
|
7 |
+
- [Official Site](https://landsat.usgs.gov/landsat-8-cloud-cover-assessment-validation-data)
|
8 |
+
- [Paper Download](https://gerslab.cahnr.uconn.edu/wp-content/uploads/sites/2514/2021/06/1-s2.0-S0034425717301293-Steve_Foga_cloud_detection_2017.pdf)
|
9 |
+
- Data Download: [USGS](https://landsat.usgs.gov/landsat-8-cloud-cover-assessment-validation-data)
|
10 |
+
|
11 |
+
## Abstract
|
12 |
+
|
13 |
+
Clouds are a pervasive and unavoidable issue in satellite-borne optical imagery. Accurate, well-documented, and automated cloud detection algorithms are necessary to effectively leverage large collections of remotely sensed data. The Landsat project is uniquely suited for comparative validation of cloud assessment algorithms because the modular architecture of the Landsat ground system allows for quick evaluation of new code, and because Landsat has the most comprehensive manual truth masks of any current satellite data archive. Currently, the Landsat Level-1 Product Generation System (LPGS) uses separate algorithms for determining clouds, cirrus clouds, and snow and/or ice probability on a per-pixel basis. With more bands onboard the Landsat 8 Operational Land Imager (OLI)/Thermal Infrared Sensor (TIRS) satellite, and a greater number of cloud masking algorithms, the U.S. Geological Survey (USGS) is replacing the current cloud masking workflow with a more robust algorithm that is capable of working across multiple Landsat sensors with minimal modification. Because of the inherent error from stray light and intermittent data availability of TIRS, these algorithms need to operate both with and without thermal data. In this study, we created a workflow to evaluate cloud and cloud shadow masking algorithms using cloud validation masks manually derived from both Landsat 7 Enhanced Thematic Mapper Plus (ETM+) and Landsat 8 OLI/TIRS data. We created a new validation dataset consisting of 96 Landsat 8 scenes, representing different biomes and proportions of cloud cover. We evaluated algorithm performance by overall accuracy, omission error, and commission error for both cloud and cloud shadow. We found that CFMask, C code based on the Function of Mask (Fmask) algorithm, and its confidence bands have the best overall accuracy among the many algorithms tested using our validation data. The Artificial Thermal-Automated Cloud Cover Algorithm (AT-ACCA) is the most accurate nonthermal-based algorithm. We give preference to CFMask for operational cloud and cloud shadow detection, as it is derived from a priori knowledge of physical phenomena and is operable without geographic restriction, making it useful for current and future land imaging missions without having to be retrained in a machine-learning environment.
|
14 |
+
|
15 |
+
## Dataset
|
16 |
+
|
17 |
+
This collection contains 96 Pre-Collection Landsat 8 Operational Land Imager (OLI) Thermal Infrared Sensor (TIRS) terrain-corrected (Level-1T) scenes, displayed in the biomes listed below. Manually generated cloud masks are used to validate cloud cover assessment algorithms, which in turn are intended to compute the percentage of cloud cover in each scene.
|
18 |
+
|
19 |
+

|
20 |
+
|
21 |
+
|
22 |
+
```yaml
|
23 |
+
name: hrc_whu
|
24 |
+
source: Landsat-8 OLI/TIRS
|
25 |
+
band: 9
|
26 |
+
resolution: 30m
|
27 |
+
pixel: ∼7000 × 6000
|
28 |
+
train: -
|
29 |
+
val: -
|
30 |
+
test: -
|
31 |
+
disk: 88GB
|
32 |
+
annotation:
|
33 |
+
- 0: Fill
|
34 |
+
- 64: Cloud Shadow
|
35 |
+
- 128: Clear
|
36 |
+
- 192: Thin Cloud
|
37 |
+
- 255: Cloud
|
38 |
+
scene: [Barren,Forest,Grass/Crops,Shrubland,Snow/Ice,Urban,Water,Wetlands]
|
39 |
+
```
|
40 |
+
|
41 |
+
## Citation
|
42 |
+
|
43 |
+
```bibtex
|
44 |
+
@article{l8biome,
|
45 |
+
title = {Cloud detection algorithm comparison and validation for operational Landsat data products},
|
46 |
+
journal = {Remote Sensing of Environment},
|
47 |
+
volume = {194},
|
48 |
+
pages = {379-390},
|
49 |
+
year = {2017},
|
50 |
+
issn = {0034-4257},
|
51 |
+
doi = {https://doi.org/10.1016/j.rse.2017.03.026},
|
52 |
+
url = {https://www.sciencedirect.com/science/article/pii/S0034425717301293},
|
53 |
+
author = {Steve Foga and Pat L. Scaramuzza and Song Guo and Zhe Zhu and Ronald D. Dilley and Tim Beckmann and Gail L. Schmidt and John L. Dwyer and M. {Joseph Hughes} and Brady Laue},
|
54 |
+
keywords = {Landsat, CFMask, Cloud detection, Cloud validation masks, Biome sampling, Data products},
|
55 |
+
}
|
56 |
+
```
|
configs/data/celeba.yaml
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
_target_: src.data.celeba_datamodule.CelebADataModule
|
2 |
-
size: 512 # image size
|
3 |
-
test_dataset_size: 3000
|
4 |
-
conditions: [] # [] for image-only, ['seg_mask', 'text'] for multi-modal conditions, ['seg_mask'] for segmentation mask only, ['text'] for text only
|
5 |
-
batch_size: 2
|
6 |
-
num_workers: 8
|
7 |
-
pin_memory: False
|
8 |
persistent_workers: False
|
|
|
1 |
+
_target_: src.data.celeba_datamodule.CelebADataModule
|
2 |
+
size: 512 # image size
|
3 |
+
test_dataset_size: 3000
|
4 |
+
conditions: [] # [] for image-only, ['seg_mask', 'text'] for multi-modal conditions, ['seg_mask'] for segmentation mask only, ['text'] for text only
|
5 |
+
batch_size: 2
|
6 |
+
num_workers: 8
|
7 |
+
pin_memory: False
|
8 |
persistent_workers: False
|
configs/data/hrcwhu/README.md
CHANGED
@@ -1,56 +1,56 @@
|
|
1 |
-
# HRC_WHU
|
2 |
-
|
3 |
-
> [Deep learning based cloud detection for medium and high resolution remote sensing images of different sensors](https://www.sciencedirect.com/science/article/pii/S0924271619300565)
|
4 |
-
|
5 |
-
## Introduction
|
6 |
-
|
7 |
-
- [Official Site](http://sendimage.whu.edu.cn/en/hrc_whu/)
|
8 |
-
- [Paper Download](http://sendimage.whu.edu.cn/en/wp-content/uploads/2019/03/2019_PHOTO_Zhiwei-Li_Deep-learning-based-cloud-detection-for-medium-and-high-resolution-remote-sensing-images-of-different-sensors.pdf)
|
9 |
-
- Data Download: [Baidu Disk](https://pan.baidu.com/s/1thOTKVO2iTAalFAjFI2_ZQ) (password: ihfb) or [Google Drive](https://drive.google.com/file/d/1qqikjaX7tkfOONsF5EtR4vl6J7sToA6p/view?usp=sharing)
|
10 |
-
|
11 |
-
## Abstract
|
12 |
-
|
13 |
-
Cloud detection is an important preprocessing step for the precise application of optical satellite imagery. In this paper, we propose a deep learning based cloud detection method named multi-scale convolutional feature fusion (MSCFF) for remote sensing images of different sensors. In the network architecture of MSCFF, the symmetric encoder-decoder module, which provides both local and global context by densifying feature maps with trainable convolutional filter banks, is utilized to extract multi-scale and high-level spatial features. The feature maps of multiple scales are then up-sampled and concatenated, and a novel multi-scale feature fusion module is designed to fuse the features of different scales for the output. The two output feature maps of the network are cloud and cloud shadow maps, which are in turn fed to binary classifiers outside the model to obtain the final cloud and cloud shadow mask. The MSCFF method was validated on hundreds of globally distributed optical satellite images, with spatial resolutions ranging from 0.5 to 50 m, including Landsat-5/7/8, Gaofen-1/2/4, Sentinel-2, Ziyuan-3, CBERS-04, Huanjing-1, and collected high-resolution images exported from Google Earth. The experimental results show that MSCFF achieves a higher accuracy than the traditional rule-based cloud detection methods and the state-of-the-art deep learning models, especially in bright surface covered areas. The effectiveness of MSCFF means that it has great promise for the practical application of cloud detection for multiple types of medium and high-resolution remote sensing images. Our established global high-resolution cloud detection validation dataset has been made available online (http://sendimage.whu.edu.cn/en/mscff/).
|
14 |
-
|
15 |
-
## Dataset
|
16 |
-
|
17 |
-
The high-resolution cloud cover validation dataset was created by the SENDIMAGE Lab in Wuhan University, and has been termed HRC_WHU. The HRC_WHU data comprise 150 high-resolution images acquired with three RGB channels and a resolution varying from 0.5 to 15 m in different global regions. As shown in Fig. 1, the images were collected from Google Earth (Google Inc.) in five main land-cover types, i.e., water, vegetation, urban, snow/ice, and barren. The associated reference cloud masks were digitized by experts in the field of remote sensing image interpretation. The established high-resolution cloud cover validation dataset has been made available online.
|
18 |
-
|
19 |
-

|
20 |
-
|
21 |
-
```yaml
|
22 |
-
name: hrc_whu
|
23 |
-
source: google earth
|
24 |
-
band: 3 (rgb)
|
25 |
-
resolution: 0.5m-15m
|
26 |
-
pixel: 1280x720
|
27 |
-
train: 120
|
28 |
-
val: null
|
29 |
-
test: 30
|
30 |
-
disk: 168mb
|
31 |
-
annotation:
|
32 |
-
- 0: clear sky
|
33 |
-
- 1: cloud
|
34 |
-
scene: [water, vegetation, urban, snow/ice, barren]
|
35 |
-
```
|
36 |
-
|
37 |
-
## Annotation
|
38 |
-
|
39 |
-
In the procedure of delineating the cloud mask for high-resolution imagery, we first stretched the cloudy image to the appropriate contrast in Adobe Photoshop. The lasso tool and magic wand tool were then alternately used to mark the locations of the clouds in the image. The manually labeled reference mask was finally created by assigning the pixel values of cloud and clear sky to 255 and 0, respectively. Note that a tolerance of 5–30 was set when using the magic wand tool, and the lasso tool was used to modify the areas that could not be correctly selected by the magic wand tool. As we did in a previous study (Li et al., 2017), the thin clouds were labeled as cloud if they were visually identifiable and the underlying surface could not be seen clearly. Considering that cloud shadows in high-resolution images are rare and hard to accurately select, only clouds were labeled in the reference masks.
|
40 |
-
|
41 |
-
## Citation
|
42 |
-
|
43 |
-
```bibtex
|
44 |
-
@article{hrc_whu,
|
45 |
-
title = {Deep learning based cloud detection for medium and high resolution remote sensing images of different sensors},
|
46 |
-
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
|
47 |
-
volume = {150},
|
48 |
-
pages = {197-212},
|
49 |
-
year = {2019},
|
50 |
-
issn = {0924-2716},
|
51 |
-
doi = {https://doi.org/10.1016/j.isprsjprs.2019.02.017},
|
52 |
-
url = {https://www.sciencedirect.com/science/article/pii/S0924271619300565},
|
53 |
-
author = {Zhiwei Li and Huanfeng Shen and Qing Cheng and Yuhao Liu and Shucheng You and Zongyi He},
|
54 |
-
keywords = {Cloud detection, Cloud shadow, Convolutional neural network, Multi-scale, Convolutional feature fusion, MSCFF}
|
55 |
-
}
|
56 |
-
```
|
|
|
1 |
+
# HRC_WHU
|
2 |
+
|
3 |
+
> [Deep learning based cloud detection for medium and high resolution remote sensing images of different sensors](https://www.sciencedirect.com/science/article/pii/S0924271619300565)
|
4 |
+
|
5 |
+
## Introduction
|
6 |
+
|
7 |
+
- [Official Site](http://sendimage.whu.edu.cn/en/hrc_whu/)
|
8 |
+
- [Paper Download](http://sendimage.whu.edu.cn/en/wp-content/uploads/2019/03/2019_PHOTO_Zhiwei-Li_Deep-learning-based-cloud-detection-for-medium-and-high-resolution-remote-sensing-images-of-different-sensors.pdf)
|
9 |
+
- Data Download: [Baidu Disk](https://pan.baidu.com/s/1thOTKVO2iTAalFAjFI2_ZQ) (password: ihfb) or [Google Drive](https://drive.google.com/file/d/1qqikjaX7tkfOONsF5EtR4vl6J7sToA6p/view?usp=sharing)
|
10 |
+
|
11 |
+
## Abstract
|
12 |
+
|
13 |
+
Cloud detection is an important preprocessing step for the precise application of optical satellite imagery. In this paper, we propose a deep learning based cloud detection method named multi-scale convolutional feature fusion (MSCFF) for remote sensing images of different sensors. In the network architecture of MSCFF, the symmetric encoder-decoder module, which provides both local and global context by densifying feature maps with trainable convolutional filter banks, is utilized to extract multi-scale and high-level spatial features. The feature maps of multiple scales are then up-sampled and concatenated, and a novel multi-scale feature fusion module is designed to fuse the features of different scales for the output. The two output feature maps of the network are cloud and cloud shadow maps, which are in turn fed to binary classifiers outside the model to obtain the final cloud and cloud shadow mask. The MSCFF method was validated on hundreds of globally distributed optical satellite images, with spatial resolutions ranging from 0.5 to 50 m, including Landsat-5/7/8, Gaofen-1/2/4, Sentinel-2, Ziyuan-3, CBERS-04, Huanjing-1, and collected high-resolution images exported from Google Earth. The experimental results show that MSCFF achieves a higher accuracy than the traditional rule-based cloud detection methods and the state-of-the-art deep learning models, especially in bright surface covered areas. The effectiveness of MSCFF means that it has great promise for the practical application of cloud detection for multiple types of medium and high-resolution remote sensing images. Our established global high-resolution cloud detection validation dataset has been made available online (http://sendimage.whu.edu.cn/en/mscff/).
|
14 |
+
|
15 |
+
## Dataset
|
16 |
+
|
17 |
+
The high-resolution cloud cover validation dataset was created by the SENDIMAGE Lab in Wuhan University, and has been termed HRC_WHU. The HRC_WHU data comprise 150 high-resolution images acquired with three RGB channels and a resolution varying from 0.5 to 15 m in different global regions. As shown in Fig. 1, the images were collected from Google Earth (Google Inc.) in five main land-cover types, i.e., water, vegetation, urban, snow/ice, and barren. The associated reference cloud masks were digitized by experts in the field of remote sensing image interpretation. The established high-resolution cloud cover validation dataset has been made available online.
|
18 |
+
|
19 |
+

|
20 |
+
|
21 |
+
```yaml
|
22 |
+
name: hrc_whu
|
23 |
+
source: google earth
|
24 |
+
band: 3 (rgb)
|
25 |
+
resolution: 0.5m-15m
|
26 |
+
pixel: 1280x720
|
27 |
+
train: 120
|
28 |
+
val: null
|
29 |
+
test: 30
|
30 |
+
disk: 168mb
|
31 |
+
annotation:
|
32 |
+
- 0: clear sky
|
33 |
+
- 1: cloud
|
34 |
+
scene: [water, vegetation, urban, snow/ice, barren]
|
35 |
+
```
|
36 |
+
|
37 |
+
## Annotation
|
38 |
+
|
39 |
+
In the procedure of delineating the cloud mask for high-resolution imagery, we first stretched the cloudy image to the appropriate contrast in Adobe Photoshop. The lasso tool and magic wand tool were then alternately used to mark the locations of the clouds in the image. The manually labeled reference mask was finally created by assigning the pixel values of cloud and clear sky to 255 and 0, respectively. Note that a tolerance of 5–30 was set when using the magic wand tool, and the lasso tool was used to modify the areas that could not be correctly selected by the magic wand tool. As we did in a previous study (Li et al., 2017), the thin clouds were labeled as cloud if they were visually identifiable and the underlying surface could not be seen clearly. Considering that cloud shadows in high-resolution images are rare and hard to accurately select, only clouds were labeled in the reference masks.
|
40 |
+
|
41 |
+
## Citation
|
42 |
+
|
43 |
+
```bibtex
|
44 |
+
@article{hrc_whu,
|
45 |
+
title = {Deep learning based cloud detection for medium and high resolution remote sensing images of different sensors},
|
46 |
+
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
|
47 |
+
volume = {150},
|
48 |
+
pages = {197-212},
|
49 |
+
year = {2019},
|
50 |
+
issn = {0924-2716},
|
51 |
+
doi = {https://doi.org/10.1016/j.isprsjprs.2019.02.017},
|
52 |
+
url = {https://www.sciencedirect.com/science/article/pii/S0924271619300565},
|
53 |
+
author = {Zhiwei Li and Huanfeng Shen and Qing Cheng and Yuhao Liu and Shucheng You and Zongyi He},
|
54 |
+
keywords = {Cloud detection, Cloud shadow, Convolutional neural network, Multi-scale, Convolutional feature fusion, MSCFF}
|
55 |
+
}
|
56 |
+
```
|
configs/data/hrcwhu/hrcwhu.yaml
CHANGED
@@ -1,89 +1,89 @@
|
|
1 |
-
_target_: src.data.hrcwhu_datamodule.HRCWHUDataModule
|
2 |
-
root: data/hrcwhu
|
3 |
-
train_pipeline:
|
4 |
-
all_transform:
|
5 |
-
_target_: albumentations.Compose
|
6 |
-
transforms:
|
7 |
-
- _target_: albumentations.HorizontalFlip
|
8 |
-
p: 0.5
|
9 |
-
- _target_: albumentations.ShiftScaleRotate
|
10 |
-
p: 1
|
11 |
-
- _target_: albumentations.RandomCrop
|
12 |
-
height: 256
|
13 |
-
width: 256
|
14 |
-
always_apply: true
|
15 |
-
- _target_: albumentations.GaussNoise
|
16 |
-
p: 0.2
|
17 |
-
- _target_: albumentations.Perspective
|
18 |
-
p: 0.5
|
19 |
-
- _target_: albumentations.OneOf
|
20 |
-
transforms:
|
21 |
-
- _target_: albumentations.CLAHE
|
22 |
-
p: 1
|
23 |
-
- _target_: albumentations.RandomGamma
|
24 |
-
p: 1
|
25 |
-
p: 0.9
|
26 |
-
|
27 |
-
- _target_: albumentations.OneOf
|
28 |
-
transforms:
|
29 |
-
- _target_: albumentations.Sharpen
|
30 |
-
p: 1
|
31 |
-
- _target_: albumentations.Blur
|
32 |
-
p: 1
|
33 |
-
- _target_: albumentations.MotionBlur
|
34 |
-
p: 1
|
35 |
-
p: 0.9
|
36 |
-
|
37 |
-
- _target_: albumentations.OneOf
|
38 |
-
transforms:
|
39 |
-
- _target_: albumentations.RandomBrightnessContrast
|
40 |
-
p: 1
|
41 |
-
- _target_: albumentations.HueSaturationValue
|
42 |
-
p: 1
|
43 |
-
p: 0.9
|
44 |
-
|
45 |
-
img_transform:
|
46 |
-
_target_: albumentations.Compose
|
47 |
-
transforms:
|
48 |
-
- _target_: albumentations.ToFloat
|
49 |
-
max_value: 255.0
|
50 |
-
- _target_: albumentations.pytorch.transforms.ToTensorV2
|
51 |
-
|
52 |
-
ann_transform: null
|
53 |
-
val_pipeline:
|
54 |
-
all_transform:
|
55 |
-
_target_: albumentations.Compose
|
56 |
-
transforms:
|
57 |
-
- _target_: albumentations.Resize
|
58 |
-
height: 256
|
59 |
-
width: 256
|
60 |
-
|
61 |
-
img_transform:
|
62 |
-
_target_: albumentations.Compose
|
63 |
-
transforms:
|
64 |
-
- _target_: albumentations.ToFloat
|
65 |
-
max_value: 255.0
|
66 |
-
- _target_: albumentations.pytorch.transforms.ToTensorV2
|
67 |
-
ann_transform: null
|
68 |
-
|
69 |
-
test_pipeline:
|
70 |
-
all_transform:
|
71 |
-
_target_: albumentations.Compose
|
72 |
-
transforms:
|
73 |
-
- _target_: albumentations.Resize
|
74 |
-
height: 256
|
75 |
-
width: 256
|
76 |
-
|
77 |
-
img_transform:
|
78 |
-
_target_: albumentations.Compose
|
79 |
-
transforms:
|
80 |
-
- _target_: albumentations.ToFloat
|
81 |
-
max_value: 255.0
|
82 |
-
- _target_: albumentations.pytorch.transforms.ToTensorV2
|
83 |
-
ann_transform: null
|
84 |
-
|
85 |
-
seed: 42
|
86 |
-
batch_size: 8
|
87 |
-
num_workers: 8
|
88 |
-
pin_memory: True
|
89 |
persistent_workers: True
|
|
|
1 |
+
_target_: src.data.hrcwhu_datamodule.HRCWHUDataModule
|
2 |
+
root: data/hrcwhu
|
3 |
+
train_pipeline:
|
4 |
+
all_transform:
|
5 |
+
_target_: albumentations.Compose
|
6 |
+
transforms:
|
7 |
+
- _target_: albumentations.HorizontalFlip
|
8 |
+
p: 0.5
|
9 |
+
- _target_: albumentations.ShiftScaleRotate
|
10 |
+
p: 1
|
11 |
+
- _target_: albumentations.RandomCrop
|
12 |
+
height: 256
|
13 |
+
width: 256
|
14 |
+
always_apply: true
|
15 |
+
- _target_: albumentations.GaussNoise
|
16 |
+
p: 0.2
|
17 |
+
- _target_: albumentations.Perspective
|
18 |
+
p: 0.5
|
19 |
+
- _target_: albumentations.OneOf
|
20 |
+
transforms:
|
21 |
+
- _target_: albumentations.CLAHE
|
22 |
+
p: 1
|
23 |
+
- _target_: albumentations.RandomGamma
|
24 |
+
p: 1
|
25 |
+
p: 0.9
|
26 |
+
|
27 |
+
- _target_: albumentations.OneOf
|
28 |
+
transforms:
|
29 |
+
- _target_: albumentations.Sharpen
|
30 |
+
p: 1
|
31 |
+
- _target_: albumentations.Blur
|
32 |
+
p: 1
|
33 |
+
- _target_: albumentations.MotionBlur
|
34 |
+
p: 1
|
35 |
+
p: 0.9
|
36 |
+
|
37 |
+
- _target_: albumentations.OneOf
|
38 |
+
transforms:
|
39 |
+
- _target_: albumentations.RandomBrightnessContrast
|
40 |
+
p: 1
|
41 |
+
- _target_: albumentations.HueSaturationValue
|
42 |
+
p: 1
|
43 |
+
p: 0.9
|
44 |
+
|
45 |
+
img_transform:
|
46 |
+
_target_: albumentations.Compose
|
47 |
+
transforms:
|
48 |
+
- _target_: albumentations.ToFloat
|
49 |
+
max_value: 255.0
|
50 |
+
- _target_: albumentations.pytorch.transforms.ToTensorV2
|
51 |
+
|
52 |
+
ann_transform: null
|
53 |
+
val_pipeline:
|
54 |
+
all_transform:
|
55 |
+
_target_: albumentations.Compose
|
56 |
+
transforms:
|
57 |
+
- _target_: albumentations.Resize
|
58 |
+
height: 256
|
59 |
+
width: 256
|
60 |
+
|
61 |
+
img_transform:
|
62 |
+
_target_: albumentations.Compose
|
63 |
+
transforms:
|
64 |
+
- _target_: albumentations.ToFloat
|
65 |
+
max_value: 255.0
|
66 |
+
- _target_: albumentations.pytorch.transforms.ToTensorV2
|
67 |
+
ann_transform: null
|
68 |
+
|
69 |
+
test_pipeline:
|
70 |
+
all_transform:
|
71 |
+
_target_: albumentations.Compose
|
72 |
+
transforms:
|
73 |
+
- _target_: albumentations.Resize
|
74 |
+
height: 256
|
75 |
+
width: 256
|
76 |
+
|
77 |
+
img_transform:
|
78 |
+
_target_: albumentations.Compose
|
79 |
+
transforms:
|
80 |
+
- _target_: albumentations.ToFloat
|
81 |
+
max_value: 255.0
|
82 |
+
- _target_: albumentations.pytorch.transforms.ToTensorV2
|
83 |
+
ann_transform: null
|
84 |
+
|
85 |
+
seed: 42
|
86 |
+
batch_size: 8
|
87 |
+
num_workers: 8
|
88 |
+
pin_memory: True
|
89 |
persistent_workers: True
|
configs/data/mnist.yaml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
_target_: src.data.mnist_datamodule.MNISTDataModule
|
2 |
-
data_dir: ${paths.data_dir}
|
3 |
-
batch_size: 128 # Needs to be divisible by the number of devices (e.g., if in a distributed setup)
|
4 |
-
train_val_test_split: [55_000, 5_000, 10_000]
|
5 |
-
num_workers: 0
|
6 |
-
pin_memory: False
|
|
|
1 |
+
_target_: src.data.mnist_datamodule.MNISTDataModule
|
2 |
+
data_dir: ${paths.data_dir}
|
3 |
+
batch_size: 128 # Needs to be divisible by the number of devices (e.g., if in a distributed setup)
|
4 |
+
train_val_test_split: [55_000, 5_000, 10_000]
|
5 |
+
num_workers: 0
|
6 |
+
pin_memory: False
|
configs/debug/default.yaml
CHANGED
@@ -1,35 +1,35 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# default debugging setup, runs 1 full epoch
|
4 |
-
# other debugging configs can inherit from this one
|
5 |
-
|
6 |
-
# overwrite task name so debugging logs are stored in separate folder
|
7 |
-
task_name: "debug"
|
8 |
-
|
9 |
-
# disable callbacks and loggers during debugging
|
10 |
-
callbacks: null
|
11 |
-
logger: null
|
12 |
-
|
13 |
-
extras:
|
14 |
-
ignore_warnings: False
|
15 |
-
enforce_tags: False
|
16 |
-
|
17 |
-
# sets level of all command line loggers to 'DEBUG'
|
18 |
-
# https://hydra.cc/docs/tutorials/basic/running_your_app/logging/
|
19 |
-
hydra:
|
20 |
-
job_logging:
|
21 |
-
root:
|
22 |
-
level: DEBUG
|
23 |
-
|
24 |
-
# use this to also set hydra loggers to 'DEBUG'
|
25 |
-
# verbose: True
|
26 |
-
|
27 |
-
trainer:
|
28 |
-
max_epochs: 1
|
29 |
-
accelerator: cpu # debuggers don't like gpus
|
30 |
-
devices: 1 # debuggers don't like multiprocessing
|
31 |
-
detect_anomaly: true # raise exception if NaN or +/-inf is detected in any tensor
|
32 |
-
|
33 |
-
data:
|
34 |
-
num_workers: 0 # debuggers don't like multiprocessing
|
35 |
-
pin_memory: False # disable gpu memory pin
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# default debugging setup, runs 1 full epoch
|
4 |
+
# other debugging configs can inherit from this one
|
5 |
+
|
6 |
+
# overwrite task name so debugging logs are stored in separate folder
|
7 |
+
task_name: "debug"
|
8 |
+
|
9 |
+
# disable callbacks and loggers during debugging
|
10 |
+
callbacks: null
|
11 |
+
logger: null
|
12 |
+
|
13 |
+
extras:
|
14 |
+
ignore_warnings: False
|
15 |
+
enforce_tags: False
|
16 |
+
|
17 |
+
# sets level of all command line loggers to 'DEBUG'
|
18 |
+
# https://hydra.cc/docs/tutorials/basic/running_your_app/logging/
|
19 |
+
hydra:
|
20 |
+
job_logging:
|
21 |
+
root:
|
22 |
+
level: DEBUG
|
23 |
+
|
24 |
+
# use this to also set hydra loggers to 'DEBUG'
|
25 |
+
# verbose: True
|
26 |
+
|
27 |
+
trainer:
|
28 |
+
max_epochs: 1
|
29 |
+
accelerator: cpu # debuggers don't like gpus
|
30 |
+
devices: 1 # debuggers don't like multiprocessing
|
31 |
+
detect_anomaly: true # raise exception if NaN or +/-inf is detected in any tensor
|
32 |
+
|
33 |
+
data:
|
34 |
+
num_workers: 0 # debuggers don't like multiprocessing
|
35 |
+
pin_memory: False # disable gpu memory pin
|
configs/debug/fdr.yaml
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# runs 1 train, 1 validation and 1 test step
|
4 |
-
|
5 |
-
defaults:
|
6 |
-
- default
|
7 |
-
|
8 |
-
trainer:
|
9 |
-
fast_dev_run: true
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# runs 1 train, 1 validation and 1 test step
|
4 |
+
|
5 |
+
defaults:
|
6 |
+
- default
|
7 |
+
|
8 |
+
trainer:
|
9 |
+
fast_dev_run: true
|
configs/debug/limit.yaml
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# uses only 1% of the training data and 5% of validation/test data
|
4 |
-
|
5 |
-
defaults:
|
6 |
-
- default
|
7 |
-
|
8 |
-
trainer:
|
9 |
-
max_epochs: 3
|
10 |
-
limit_train_batches: 0.01
|
11 |
-
limit_val_batches: 0.05
|
12 |
-
limit_test_batches: 0.05
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# uses only 1% of the training data and 5% of validation/test data
|
4 |
+
|
5 |
+
defaults:
|
6 |
+
- default
|
7 |
+
|
8 |
+
trainer:
|
9 |
+
max_epochs: 3
|
10 |
+
limit_train_batches: 0.01
|
11 |
+
limit_val_batches: 0.05
|
12 |
+
limit_test_batches: 0.05
|
configs/debug/overfit.yaml
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# overfits to 3 batches
|
4 |
-
|
5 |
-
defaults:
|
6 |
-
- default
|
7 |
-
|
8 |
-
trainer:
|
9 |
-
max_epochs: 20
|
10 |
-
overfit_batches: 3
|
11 |
-
|
12 |
-
# model ckpt and early stopping need to be disabled during overfitting
|
13 |
-
callbacks: null
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# overfits to 3 batches
|
4 |
+
|
5 |
+
defaults:
|
6 |
+
- default
|
7 |
+
|
8 |
+
trainer:
|
9 |
+
max_epochs: 20
|
10 |
+
overfit_batches: 3
|
11 |
+
|
12 |
+
# model ckpt and early stopping need to be disabled during overfitting
|
13 |
+
callbacks: null
|
configs/debug/profiler.yaml
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# runs with execution time profiling
|
4 |
-
|
5 |
-
defaults:
|
6 |
-
- default
|
7 |
-
|
8 |
-
trainer:
|
9 |
-
max_epochs: 1
|
10 |
-
profiler: "simple"
|
11 |
-
# profiler: "advanced"
|
12 |
-
# profiler: "pytorch"
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# runs with execution time profiling
|
4 |
+
|
5 |
+
defaults:
|
6 |
+
- default
|
7 |
+
|
8 |
+
trainer:
|
9 |
+
max_epochs: 1
|
10 |
+
profiler: "simple"
|
11 |
+
# profiler: "advanced"
|
12 |
+
# profiler: "pytorch"
|
configs/eval.yaml
CHANGED
@@ -1,18 +1,18 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
defaults:
|
4 |
-
- _self_
|
5 |
-
- data: mnist # choose datamodule with `test_dataloader()` for evaluation
|
6 |
-
- model: mnist
|
7 |
-
- logger: null
|
8 |
-
- trainer: default
|
9 |
-
- paths: default
|
10 |
-
- extras: default
|
11 |
-
- hydra: default
|
12 |
-
|
13 |
-
task_name: "eval"
|
14 |
-
|
15 |
-
tags: ["dev"]
|
16 |
-
|
17 |
-
# passing checkpoint path is necessary for evaluation
|
18 |
-
ckpt_path: ???
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
defaults:
|
4 |
+
- _self_
|
5 |
+
- data: mnist # choose datamodule with `test_dataloader()` for evaluation
|
6 |
+
- model: mnist
|
7 |
+
- logger: null
|
8 |
+
- trainer: default
|
9 |
+
- paths: default
|
10 |
+
- extras: default
|
11 |
+
- hydra: default
|
12 |
+
|
13 |
+
task_name: "eval"
|
14 |
+
|
15 |
+
tags: ["dev"]
|
16 |
+
|
17 |
+
# passing checkpoint path is necessary for evaluation
|
18 |
+
ckpt_path: ???
|
configs/experiment/hrcwhu_cdnetv1.yaml
CHANGED
@@ -1,47 +1,47 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# to execute this experiment run:
|
4 |
-
# python train.py experiment=example
|
5 |
-
|
6 |
-
defaults:
|
7 |
-
- override /trainer: gpu
|
8 |
-
- override /data: hrcwhu/hrcwhu
|
9 |
-
- override /model: cdnetv1/cdnetv1
|
10 |
-
- override /logger: wandb
|
11 |
-
- override /callbacks: default
|
12 |
-
|
13 |
-
# all parameters below will be merged with parameters from default configurations set above
|
14 |
-
# this allows you to overwrite only specified parameters
|
15 |
-
|
16 |
-
tags: ["hrcWhu", "cdnetv1"]
|
17 |
-
|
18 |
-
seed: 42
|
19 |
-
|
20 |
-
|
21 |
-
# scheduler:
|
22 |
-
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
-
# _partial_: true
|
24 |
-
# mode: min
|
25 |
-
# factor: 0.1
|
26 |
-
# patience: 10
|
27 |
-
|
28 |
-
logger:
|
29 |
-
wandb:
|
30 |
-
project: "hrcWhu"
|
31 |
-
name: "cdnetv1"
|
32 |
-
aim:
|
33 |
-
experiment: "hrcwhu_cdnetv1"
|
34 |
-
|
35 |
-
callbacks:
|
36 |
-
model_checkpoint:
|
37 |
-
dirpath: ${paths.output_dir}/checkpoints
|
38 |
-
filename: "epoch_{epoch:03d}"
|
39 |
-
monitor: "val/loss"
|
40 |
-
mode: "min"
|
41 |
-
save_last: True
|
42 |
-
auto_insert_metric_name: False
|
43 |
-
|
44 |
-
early_stopping:
|
45 |
-
monitor: "val/loss"
|
46 |
-
patience: 10
|
47 |
mode: "min"
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# to execute this experiment run:
|
4 |
+
# python train.py experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /trainer: gpu
|
8 |
+
- override /data: hrcwhu/hrcwhu
|
9 |
+
- override /model: cdnetv1/cdnetv1
|
10 |
+
- override /logger: wandb
|
11 |
+
- override /callbacks: default
|
12 |
+
|
13 |
+
# all parameters below will be merged with parameters from default configurations set above
|
14 |
+
# this allows you to overwrite only specified parameters
|
15 |
+
|
16 |
+
tags: ["hrcWhu", "cdnetv1"]
|
17 |
+
|
18 |
+
seed: 42
|
19 |
+
|
20 |
+
|
21 |
+
# scheduler:
|
22 |
+
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
+
# _partial_: true
|
24 |
+
# mode: min
|
25 |
+
# factor: 0.1
|
26 |
+
# patience: 10
|
27 |
+
|
28 |
+
logger:
|
29 |
+
wandb:
|
30 |
+
project: "hrcWhu"
|
31 |
+
name: "cdnetv1"
|
32 |
+
aim:
|
33 |
+
experiment: "hrcwhu_cdnetv1"
|
34 |
+
|
35 |
+
callbacks:
|
36 |
+
model_checkpoint:
|
37 |
+
dirpath: ${paths.output_dir}/checkpoints
|
38 |
+
filename: "epoch_{epoch:03d}"
|
39 |
+
monitor: "val/loss"
|
40 |
+
mode: "min"
|
41 |
+
save_last: True
|
42 |
+
auto_insert_metric_name: False
|
43 |
+
|
44 |
+
early_stopping:
|
45 |
+
monitor: "val/loss"
|
46 |
+
patience: 10
|
47 |
mode: "min"
|
configs/experiment/hrcwhu_cdnetv2.yaml
CHANGED
@@ -1,47 +1,47 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# to execute this experiment run:
|
4 |
-
# python train.py experiment=example
|
5 |
-
|
6 |
-
defaults:
|
7 |
-
- override /trainer: gpu
|
8 |
-
- override /data: hrcwhu/hrcwhu
|
9 |
-
- override /model: cdnetv2/cdnetv2
|
10 |
-
- override /logger: wandb
|
11 |
-
- override /callbacks: default
|
12 |
-
|
13 |
-
# all parameters below will be merged with parameters from default configurations set above
|
14 |
-
# this allows you to overwrite only specified parameters
|
15 |
-
|
16 |
-
tags: ["hrcWhu", "cdnetv2"]
|
17 |
-
|
18 |
-
seed: 42
|
19 |
-
|
20 |
-
|
21 |
-
# scheduler:
|
22 |
-
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
-
# _partial_: true
|
24 |
-
# mode: min
|
25 |
-
# factor: 0.1
|
26 |
-
# patience: 10
|
27 |
-
|
28 |
-
logger:
|
29 |
-
wandb:
|
30 |
-
project: "hrcWhu"
|
31 |
-
name: "cdnetv2"
|
32 |
-
aim:
|
33 |
-
experiment: "hrcwhu_cdnetv2"
|
34 |
-
|
35 |
-
callbacks:
|
36 |
-
model_checkpoint:
|
37 |
-
dirpath: ${paths.output_dir}/checkpoints
|
38 |
-
filename: "epoch_{epoch:03d}"
|
39 |
-
monitor: "val/loss"
|
40 |
-
mode: "min"
|
41 |
-
save_last: True
|
42 |
-
auto_insert_metric_name: False
|
43 |
-
|
44 |
-
early_stopping:
|
45 |
-
monitor: "val/loss"
|
46 |
-
patience: 10
|
47 |
mode: "min"
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# to execute this experiment run:
|
4 |
+
# python train.py experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /trainer: gpu
|
8 |
+
- override /data: hrcwhu/hrcwhu
|
9 |
+
- override /model: cdnetv2/cdnetv2
|
10 |
+
- override /logger: wandb
|
11 |
+
- override /callbacks: default
|
12 |
+
|
13 |
+
# all parameters below will be merged with parameters from default configurations set above
|
14 |
+
# this allows you to overwrite only specified parameters
|
15 |
+
|
16 |
+
tags: ["hrcWhu", "cdnetv2"]
|
17 |
+
|
18 |
+
seed: 42
|
19 |
+
|
20 |
+
|
21 |
+
# scheduler:
|
22 |
+
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
+
# _partial_: true
|
24 |
+
# mode: min
|
25 |
+
# factor: 0.1
|
26 |
+
# patience: 10
|
27 |
+
|
28 |
+
logger:
|
29 |
+
wandb:
|
30 |
+
project: "hrcWhu"
|
31 |
+
name: "cdnetv2"
|
32 |
+
aim:
|
33 |
+
experiment: "hrcwhu_cdnetv2"
|
34 |
+
|
35 |
+
callbacks:
|
36 |
+
model_checkpoint:
|
37 |
+
dirpath: ${paths.output_dir}/checkpoints
|
38 |
+
filename: "epoch_{epoch:03d}"
|
39 |
+
monitor: "val/loss"
|
40 |
+
mode: "min"
|
41 |
+
save_last: True
|
42 |
+
auto_insert_metric_name: False
|
43 |
+
|
44 |
+
early_stopping:
|
45 |
+
monitor: "val/loss"
|
46 |
+
patience: 10
|
47 |
mode: "min"
|
configs/experiment/hrcwhu_dbnet.yaml
CHANGED
@@ -1,48 +1,48 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# to execute this experiment run:
|
4 |
-
# python train.py experiment=example
|
5 |
-
|
6 |
-
defaults:
|
7 |
-
- override /trainer: gpu
|
8 |
-
- override /data: hrcwhu/hrcwhu
|
9 |
-
- override /model: dbnet/dbnet
|
10 |
-
- override /logger: wandb
|
11 |
-
- override /callbacks: default
|
12 |
-
|
13 |
-
# all parameters below will be merged with parameters from default configurations set above
|
14 |
-
# this allows you to overwrite only specified parameters
|
15 |
-
|
16 |
-
tags: ["hrcWhu", "dbnet"]
|
17 |
-
|
18 |
-
seed: 42
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
# scheduler:
|
23 |
-
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
24 |
-
# _partial_: true
|
25 |
-
# mode: min
|
26 |
-
# factor: 0.1
|
27 |
-
# patience: 10
|
28 |
-
|
29 |
-
logger:
|
30 |
-
wandb:
|
31 |
-
project: "hrcWhu"
|
32 |
-
name: "dbnet"
|
33 |
-
aim:
|
34 |
-
experiment: "hrcwhu_dbnet"
|
35 |
-
|
36 |
-
callbacks:
|
37 |
-
model_checkpoint:
|
38 |
-
dirpath: ${paths.output_dir}/checkpoints
|
39 |
-
filename: "epoch_{epoch:03d}"
|
40 |
-
monitor: "val/loss"
|
41 |
-
mode: "min"
|
42 |
-
save_last: True
|
43 |
-
auto_insert_metric_name: False
|
44 |
-
|
45 |
-
early_stopping:
|
46 |
-
monitor: "val/loss"
|
47 |
-
patience: 10
|
48 |
mode: "min"
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# to execute this experiment run:
|
4 |
+
# python train.py experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /trainer: gpu
|
8 |
+
- override /data: hrcwhu/hrcwhu
|
9 |
+
- override /model: dbnet/dbnet
|
10 |
+
- override /logger: wandb
|
11 |
+
- override /callbacks: default
|
12 |
+
|
13 |
+
# all parameters below will be merged with parameters from default configurations set above
|
14 |
+
# this allows you to overwrite only specified parameters
|
15 |
+
|
16 |
+
tags: ["hrcWhu", "dbnet"]
|
17 |
+
|
18 |
+
seed: 42
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
# scheduler:
|
23 |
+
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
24 |
+
# _partial_: true
|
25 |
+
# mode: min
|
26 |
+
# factor: 0.1
|
27 |
+
# patience: 10
|
28 |
+
|
29 |
+
logger:
|
30 |
+
wandb:
|
31 |
+
project: "hrcWhu"
|
32 |
+
name: "dbnet"
|
33 |
+
aim:
|
34 |
+
experiment: "hrcwhu_dbnet"
|
35 |
+
|
36 |
+
callbacks:
|
37 |
+
model_checkpoint:
|
38 |
+
dirpath: ${paths.output_dir}/checkpoints
|
39 |
+
filename: "epoch_{epoch:03d}"
|
40 |
+
monitor: "val/loss"
|
41 |
+
mode: "min"
|
42 |
+
save_last: True
|
43 |
+
auto_insert_metric_name: False
|
44 |
+
|
45 |
+
early_stopping:
|
46 |
+
monitor: "val/loss"
|
47 |
+
patience: 10
|
48 |
mode: "min"
|
configs/experiment/hrcwhu_hrcloudnet.yaml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# to execute this experiment run:
|
4 |
+
# python train.py experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /trainer: gpu
|
8 |
+
- override /data: hrcwhu/hrcwhu
|
9 |
+
- override /model: hrcloudnet/hrcloudnet
|
10 |
+
- override /logger: wandb
|
11 |
+
- override /callbacks: default
|
12 |
+
|
13 |
+
# all parameters below will be merged with parameters from default configurations set above
|
14 |
+
# this allows you to overwrite only specified parameters
|
15 |
+
|
16 |
+
tags: ["hrcWhu", "hrcloudnet"]
|
17 |
+
|
18 |
+
seed: 42
|
19 |
+
|
20 |
+
|
21 |
+
# scheduler:
|
22 |
+
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
+
# _partial_: true
|
24 |
+
# mode: min
|
25 |
+
# factor: 0.1
|
26 |
+
# patience: 10
|
27 |
+
|
28 |
+
logger:
|
29 |
+
wandb:
|
30 |
+
project: "hrcWhu"
|
31 |
+
name: "hrcloudnet"
|
32 |
+
aim:
|
33 |
+
experiment: "hrcwhu_hrcloudnet"
|
34 |
+
|
35 |
+
callbacks:
|
36 |
+
model_checkpoint:
|
37 |
+
dirpath: ${paths.output_dir}/checkpoints
|
38 |
+
filename: "epoch_{epoch:03d}"
|
39 |
+
monitor: "val/loss"
|
40 |
+
mode: "min"
|
41 |
+
save_last: True
|
42 |
+
auto_insert_metric_name: False
|
43 |
+
|
44 |
+
early_stopping:
|
45 |
+
monitor: "val/loss"
|
46 |
+
patience: 10
|
47 |
+
mode: "min"
|
configs/experiment/hrcwhu_mcdnet.yaml
CHANGED
@@ -1,47 +1,47 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# to execute this experiment run:
|
4 |
-
# python train.py experiment=example
|
5 |
-
|
6 |
-
defaults:
|
7 |
-
- override /trainer: gpu
|
8 |
-
- override /data: hrcwhu/hrcwhu
|
9 |
-
- override /model: mcdnet/mcdnet
|
10 |
-
- override /logger: wandb
|
11 |
-
- override /callbacks: default
|
12 |
-
|
13 |
-
# all parameters below will be merged with parameters from default configurations set above
|
14 |
-
# this allows you to overwrite only specified parameters
|
15 |
-
|
16 |
-
tags: ["hrcWhu", "mcdnet"]
|
17 |
-
|
18 |
-
seed: 42
|
19 |
-
|
20 |
-
|
21 |
-
# scheduler:
|
22 |
-
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
-
# _partial_: true
|
24 |
-
# mode: min
|
25 |
-
# factor: 0.1
|
26 |
-
# patience: 10
|
27 |
-
|
28 |
-
logger:
|
29 |
-
wandb:
|
30 |
-
project: "hrcWhu"
|
31 |
-
name: "mcdnet"
|
32 |
-
aim:
|
33 |
-
experiment: "hrcwhu_mcdnet"
|
34 |
-
|
35 |
-
callbacks:
|
36 |
-
model_checkpoint:
|
37 |
-
dirpath: ${paths.output_dir}/checkpoints
|
38 |
-
filename: "epoch_{epoch:03d}"
|
39 |
-
monitor: "val/loss"
|
40 |
-
mode: "min"
|
41 |
-
save_last: True
|
42 |
-
auto_insert_metric_name: False
|
43 |
-
|
44 |
-
early_stopping:
|
45 |
-
monitor: "val/loss"
|
46 |
-
patience: 10
|
47 |
mode: "min"
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# to execute this experiment run:
|
4 |
+
# python train.py experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /trainer: gpu
|
8 |
+
- override /data: hrcwhu/hrcwhu
|
9 |
+
- override /model: mcdnet/mcdnet
|
10 |
+
- override /logger: wandb
|
11 |
+
- override /callbacks: default
|
12 |
+
|
13 |
+
# all parameters below will be merged with parameters from default configurations set above
|
14 |
+
# this allows you to overwrite only specified parameters
|
15 |
+
|
16 |
+
tags: ["hrcWhu", "mcdnet"]
|
17 |
+
|
18 |
+
seed: 42
|
19 |
+
|
20 |
+
|
21 |
+
# scheduler:
|
22 |
+
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
+
# _partial_: true
|
24 |
+
# mode: min
|
25 |
+
# factor: 0.1
|
26 |
+
# patience: 10
|
27 |
+
|
28 |
+
logger:
|
29 |
+
wandb:
|
30 |
+
project: "hrcWhu"
|
31 |
+
name: "mcdnet"
|
32 |
+
aim:
|
33 |
+
experiment: "hrcwhu_mcdnet"
|
34 |
+
|
35 |
+
callbacks:
|
36 |
+
model_checkpoint:
|
37 |
+
dirpath: ${paths.output_dir}/checkpoints
|
38 |
+
filename: "epoch_{epoch:03d}"
|
39 |
+
monitor: "val/loss"
|
40 |
+
mode: "min"
|
41 |
+
save_last: True
|
42 |
+
auto_insert_metric_name: False
|
43 |
+
|
44 |
+
early_stopping:
|
45 |
+
monitor: "val/loss"
|
46 |
+
patience: 10
|
47 |
mode: "min"
|
configs/experiment/hrcwhu_scnn.yaml
CHANGED
@@ -1,47 +1,47 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# to execute this experiment run:
|
4 |
-
# python train.py experiment=example
|
5 |
-
|
6 |
-
defaults:
|
7 |
-
- override /trainer: gpu
|
8 |
-
- override /data: hrcwhu/hrcwhu
|
9 |
-
- override /model: scnn/scnn
|
10 |
-
- override /logger: wandb
|
11 |
-
- override /callbacks: default
|
12 |
-
|
13 |
-
# all parameters below will be merged with parameters from default configurations set above
|
14 |
-
# this allows you to overwrite only specified parameters
|
15 |
-
|
16 |
-
tags: ["hrcWhu", "scnn"]
|
17 |
-
|
18 |
-
seed: 42
|
19 |
-
|
20 |
-
|
21 |
-
# scheduler:
|
22 |
-
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
-
# _partial_: true
|
24 |
-
# mode: min
|
25 |
-
# factor: 0.1
|
26 |
-
# patience: 10
|
27 |
-
|
28 |
-
logger:
|
29 |
-
wandb:
|
30 |
-
project: "hrcWhu"
|
31 |
-
name: "scnn"
|
32 |
-
aim:
|
33 |
-
experiment: "hrcwhu_scnn"
|
34 |
-
|
35 |
-
callbacks:
|
36 |
-
model_checkpoint:
|
37 |
-
dirpath: ${paths.output_dir}/checkpoints
|
38 |
-
filename: "epoch_{epoch:03d}"
|
39 |
-
monitor: "val/loss"
|
40 |
-
mode: "min"
|
41 |
-
save_last: True
|
42 |
-
auto_insert_metric_name: False
|
43 |
-
|
44 |
-
early_stopping:
|
45 |
-
monitor: "val/loss"
|
46 |
-
patience: 10
|
47 |
mode: "min"
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# to execute this experiment run:
|
4 |
+
# python train.py experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /trainer: gpu
|
8 |
+
- override /data: hrcwhu/hrcwhu
|
9 |
+
- override /model: scnn/scnn
|
10 |
+
- override /logger: wandb
|
11 |
+
- override /callbacks: default
|
12 |
+
|
13 |
+
# all parameters below will be merged with parameters from default configurations set above
|
14 |
+
# this allows you to overwrite only specified parameters
|
15 |
+
|
16 |
+
tags: ["hrcWhu", "scnn"]
|
17 |
+
|
18 |
+
seed: 42
|
19 |
+
|
20 |
+
|
21 |
+
# scheduler:
|
22 |
+
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
+
# _partial_: true
|
24 |
+
# mode: min
|
25 |
+
# factor: 0.1
|
26 |
+
# patience: 10
|
27 |
+
|
28 |
+
logger:
|
29 |
+
wandb:
|
30 |
+
project: "hrcWhu"
|
31 |
+
name: "scnn"
|
32 |
+
aim:
|
33 |
+
experiment: "hrcwhu_scnn"
|
34 |
+
|
35 |
+
callbacks:
|
36 |
+
model_checkpoint:
|
37 |
+
dirpath: ${paths.output_dir}/checkpoints
|
38 |
+
filename: "epoch_{epoch:03d}"
|
39 |
+
monitor: "val/loss"
|
40 |
+
mode: "min"
|
41 |
+
save_last: True
|
42 |
+
auto_insert_metric_name: False
|
43 |
+
|
44 |
+
early_stopping:
|
45 |
+
monitor: "val/loss"
|
46 |
+
patience: 10
|
47 |
mode: "min"
|
configs/experiment/hrcwhu_unet.yaml
CHANGED
@@ -1,68 +1,68 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# to execute this experiment run:
|
4 |
-
# python train.py experiment=example
|
5 |
-
|
6 |
-
defaults:
|
7 |
-
- override /trainer: gpu
|
8 |
-
- override /data: hrcwhu/hrcwhu
|
9 |
-
- override /model: null
|
10 |
-
- override /logger: wandb
|
11 |
-
- override /callbacks: default
|
12 |
-
|
13 |
-
# all parameters below will be merged with parameters from default configurations set above
|
14 |
-
# this allows you to overwrite only specified parameters
|
15 |
-
|
16 |
-
tags: ["hrcWhu", "unet"]
|
17 |
-
|
18 |
-
seed: 42
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
model:
|
23 |
-
_target_: src.models.base_module.BaseLitModule
|
24 |
-
|
25 |
-
net:
|
26 |
-
_target_: src.models.components.unet.UNet
|
27 |
-
in_channels: 3
|
28 |
-
out_channels: 2
|
29 |
-
|
30 |
-
num_classes: 2
|
31 |
-
|
32 |
-
criterion:
|
33 |
-
_target_: torch.nn.CrossEntropyLoss
|
34 |
-
|
35 |
-
optimizer:
|
36 |
-
_target_: torch.optim.SGD
|
37 |
-
_partial_: true
|
38 |
-
lr: 0.1
|
39 |
-
|
40 |
-
scheduler: null
|
41 |
-
|
42 |
-
# scheduler:
|
43 |
-
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
44 |
-
# _partial_: true
|
45 |
-
# mode: min
|
46 |
-
# factor: 0.1
|
47 |
-
# patience: 10
|
48 |
-
|
49 |
-
logger:
|
50 |
-
wandb:
|
51 |
-
project: "hrcWhu"
|
52 |
-
name: "unet"
|
53 |
-
aim:
|
54 |
-
experiment: "hrcwhu_unet"
|
55 |
-
|
56 |
-
callbacks:
|
57 |
-
model_checkpoint:
|
58 |
-
dirpath: ${paths.output_dir}/checkpoints
|
59 |
-
filename: "epoch_{epoch:03d}"
|
60 |
-
monitor: "val/loss"
|
61 |
-
mode: "min"
|
62 |
-
save_last: True
|
63 |
-
auto_insert_metric_name: False
|
64 |
-
|
65 |
-
early_stopping:
|
66 |
-
monitor: "val/loss"
|
67 |
-
patience: 10
|
68 |
mode: "min"
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# to execute this experiment run:
|
4 |
+
# python train.py experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /trainer: gpu
|
8 |
+
- override /data: hrcwhu/hrcwhu
|
9 |
+
- override /model: null
|
10 |
+
- override /logger: wandb
|
11 |
+
- override /callbacks: default
|
12 |
+
|
13 |
+
# all parameters below will be merged with parameters from default configurations set above
|
14 |
+
# this allows you to overwrite only specified parameters
|
15 |
+
|
16 |
+
tags: ["hrcWhu", "unet"]
|
17 |
+
|
18 |
+
seed: 42
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
model:
|
23 |
+
_target_: src.models.base_module.BaseLitModule
|
24 |
+
|
25 |
+
net:
|
26 |
+
_target_: src.models.components.unet.UNet
|
27 |
+
in_channels: 3
|
28 |
+
out_channels: 2
|
29 |
+
|
30 |
+
num_classes: 2
|
31 |
+
|
32 |
+
criterion:
|
33 |
+
_target_: torch.nn.CrossEntropyLoss
|
34 |
+
|
35 |
+
optimizer:
|
36 |
+
_target_: torch.optim.SGD
|
37 |
+
_partial_: true
|
38 |
+
lr: 0.1
|
39 |
+
|
40 |
+
scheduler: null
|
41 |
+
|
42 |
+
# scheduler:
|
43 |
+
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
44 |
+
# _partial_: true
|
45 |
+
# mode: min
|
46 |
+
# factor: 0.1
|
47 |
+
# patience: 10
|
48 |
+
|
49 |
+
logger:
|
50 |
+
wandb:
|
51 |
+
project: "hrcWhu"
|
52 |
+
name: "unet"
|
53 |
+
aim:
|
54 |
+
experiment: "hrcwhu_unet"
|
55 |
+
|
56 |
+
callbacks:
|
57 |
+
model_checkpoint:
|
58 |
+
dirpath: ${paths.output_dir}/checkpoints
|
59 |
+
filename: "epoch_{epoch:03d}"
|
60 |
+
monitor: "val/loss"
|
61 |
+
mode: "min"
|
62 |
+
save_last: True
|
63 |
+
auto_insert_metric_name: False
|
64 |
+
|
65 |
+
early_stopping:
|
66 |
+
monitor: "val/loss"
|
67 |
+
patience: 10
|
68 |
mode: "min"
|
configs/experiment/hrcwhu_unetmobv2.yaml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# to execute this experiment run:
|
4 |
+
# python train.py experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /trainer: gpu
|
8 |
+
- override /data: hrcwhu/hrcwhu
|
9 |
+
- override /model: unetmobv2/unetmobv2
|
10 |
+
- override /logger: wandb
|
11 |
+
- override /callbacks: default
|
12 |
+
|
13 |
+
# all parameters below will be merged with parameters from default configurations set above
|
14 |
+
# this allows you to overwrite only specified parameters
|
15 |
+
|
16 |
+
tags: ["hrcWhu", "unetmobv2"]
|
17 |
+
|
18 |
+
seed: 42
|
19 |
+
|
20 |
+
|
21 |
+
# scheduler:
|
22 |
+
# _target_: torch.optim.lr_scheduler.ReduceLROnPlateau
|
23 |
+
# _partial_: true
|
24 |
+
# mode: min
|
25 |
+
# factor: 0.1
|
26 |
+
# patience: 10
|
27 |
+
|
28 |
+
logger:
|
29 |
+
wandb:
|
30 |
+
project: "hrcWhu"
|
31 |
+
name: "unetmobv2"
|
32 |
+
aim:
|
33 |
+
experiment: "hrcwhu_unetmobv2"
|
34 |
+
|
35 |
+
callbacks:
|
36 |
+
model_checkpoint:
|
37 |
+
dirpath: ${paths.output_dir}/checkpoints
|
38 |
+
filename: "epoch_{epoch:03d}"
|
39 |
+
monitor: "val/loss"
|
40 |
+
mode: "min"
|
41 |
+
save_last: True
|
42 |
+
auto_insert_metric_name: False
|
43 |
+
|
44 |
+
early_stopping:
|
45 |
+
monitor: "val/loss"
|
46 |
+
patience: 10
|
47 |
+
mode: "min"
|
configs/extras/default.yaml
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
# disable python warnings if they annoy you
|
2 |
-
ignore_warnings: False
|
3 |
-
|
4 |
-
# ask user for tags if none are provided in the config
|
5 |
-
enforce_tags: True
|
6 |
-
|
7 |
-
# pretty print config tree at the start of the run using Rich library
|
8 |
-
print_config: True
|
|
|
1 |
+
# disable python warnings if they annoy you
|
2 |
+
ignore_warnings: False
|
3 |
+
|
4 |
+
# ask user for tags if none are provided in the config
|
5 |
+
enforce_tags: True
|
6 |
+
|
7 |
+
# pretty print config tree at the start of the run using Rich library
|
8 |
+
print_config: True
|
configs/hparams_search/mnist_optuna.yaml
CHANGED
@@ -1,52 +1,52 @@
|
|
1 |
-
# @package _global_
|
2 |
-
|
3 |
-
# example hyperparameter optimization of some experiment with Optuna:
|
4 |
-
# python train.py -m hparams_search=mnist_optuna experiment=example
|
5 |
-
|
6 |
-
defaults:
|
7 |
-
- override /hydra/sweeper: optuna
|
8 |
-
|
9 |
-
# choose metric which will be optimized by Optuna
|
10 |
-
# make sure this is the correct name of some metric logged in lightning module!
|
11 |
-
optimized_metric: "val/acc_best"
|
12 |
-
|
13 |
-
# here we define Optuna hyperparameter search
|
14 |
-
# it optimizes for value returned from function with @hydra.main decorator
|
15 |
-
# docs: https://hydra.cc/docs/next/plugins/optuna_sweeper
|
16 |
-
hydra:
|
17 |
-
mode: "MULTIRUN" # set hydra to multirun by default if this config is attached
|
18 |
-
|
19 |
-
sweeper:
|
20 |
-
_target_: hydra_plugins.hydra_optuna_sweeper.optuna_sweeper.OptunaSweeper
|
21 |
-
|
22 |
-
# storage URL to persist optimization results
|
23 |
-
# for example, you can use SQLite if you set 'sqlite:///example.db'
|
24 |
-
storage: null
|
25 |
-
|
26 |
-
# name of the study to persist optimization results
|
27 |
-
study_name: null
|
28 |
-
|
29 |
-
# number of parallel workers
|
30 |
-
n_jobs: 1
|
31 |
-
|
32 |
-
# 'minimize' or 'maximize' the objective
|
33 |
-
direction: maximize
|
34 |
-
|
35 |
-
# total number of runs that will be executed
|
36 |
-
n_trials: 20
|
37 |
-
|
38 |
-
# choose Optuna hyperparameter sampler
|
39 |
-
# you can choose bayesian sampler (tpe), random search (without optimization), grid sampler, and others
|
40 |
-
# docs: https://optuna.readthedocs.io/en/stable/reference/samplers.html
|
41 |
-
sampler:
|
42 |
-
_target_: optuna.samplers.TPESampler
|
43 |
-
seed: 1234
|
44 |
-
n_startup_trials: 10 # number of random sampling runs before optimization starts
|
45 |
-
|
46 |
-
# define hyperparameter search space
|
47 |
-
params:
|
48 |
-
model.optimizer.lr: interval(0.0001, 0.1)
|
49 |
-
data.batch_size: choice(32, 64, 128, 256)
|
50 |
-
model.net.lin1_size: choice(64, 128, 256)
|
51 |
-
model.net.lin2_size: choice(64, 128, 256)
|
52 |
-
model.net.lin3_size: choice(32, 64, 128, 256)
|
|
|
1 |
+
# @package _global_
|
2 |
+
|
3 |
+
# example hyperparameter optimization of some experiment with Optuna:
|
4 |
+
# python train.py -m hparams_search=mnist_optuna experiment=example
|
5 |
+
|
6 |
+
defaults:
|
7 |
+
- override /hydra/sweeper: optuna
|
8 |
+
|
9 |
+
# choose metric which will be optimized by Optuna
|
10 |
+
# make sure this is the correct name of some metric logged in lightning module!
|
11 |
+
optimized_metric: "val/acc_best"
|
12 |
+
|
13 |
+
# here we define Optuna hyperparameter search
|
14 |
+
# it optimizes for value returned from function with @hydra.main decorator
|
15 |
+
# docs: https://hydra.cc/docs/next/plugins/optuna_sweeper
|
16 |
+
hydra:
|
17 |
+
mode: "MULTIRUN" # set hydra to multirun by default if this config is attached
|
18 |
+
|
19 |
+
sweeper:
|
20 |
+
_target_: hydra_plugins.hydra_optuna_sweeper.optuna_sweeper.OptunaSweeper
|
21 |
+
|
22 |
+
# storage URL to persist optimization results
|
23 |
+
# for example, you can use SQLite if you set 'sqlite:///example.db'
|
24 |
+
storage: null
|
25 |
+
|
26 |
+
# name of the study to persist optimization results
|
27 |
+
study_name: null
|
28 |
+
|
29 |
+
# number of parallel workers
|
30 |
+
n_jobs: 1
|
31 |
+
|
32 |
+
# 'minimize' or 'maximize' the objective
|
33 |
+
direction: maximize
|
34 |
+
|
35 |
+
# total number of runs that will be executed
|
36 |
+
n_trials: 20
|
37 |
+
|
38 |
+
# choose Optuna hyperparameter sampler
|
39 |
+
# you can choose bayesian sampler (tpe), random search (without optimization), grid sampler, and others
|
40 |
+
# docs: https://optuna.readthedocs.io/en/stable/reference/samplers.html
|
41 |
+
sampler:
|
42 |
+
_target_: optuna.samplers.TPESampler
|
43 |
+
seed: 1234
|
44 |
+
n_startup_trials: 10 # number of random sampling runs before optimization starts
|
45 |
+
|
46 |
+
# define hyperparameter search space
|
47 |
+
params:
|
48 |
+
model.optimizer.lr: interval(0.0001, 0.1)
|
49 |
+
data.batch_size: choice(32, 64, 128, 256)
|
50 |
+
model.net.lin1_size: choice(64, 128, 256)
|
51 |
+
model.net.lin2_size: choice(64, 128, 256)
|
52 |
+
model.net.lin3_size: choice(32, 64, 128, 256)
|
configs/hydra/default.yaml
CHANGED
@@ -1,19 +1,19 @@
|
|
1 |
-
# https://hydra.cc/docs/configure_hydra/intro/
|
2 |
-
|
3 |
-
# enable color logging
|
4 |
-
defaults:
|
5 |
-
- override hydra_logging: colorlog
|
6 |
-
- override job_logging: colorlog
|
7 |
-
|
8 |
-
# output directory, generated dynamically on each run
|
9 |
-
run:
|
10 |
-
dir: ${paths.log_dir}/${task_name}/runs/${logger.aim.experiment}/${now:%Y-%m-%d}_${now:%H-%M-%S}
|
11 |
-
sweep:
|
12 |
-
dir: ${paths.log_dir}/${task_name}/multiruns/${logger.aim.experiment}/${now:%Y-%m-%d}_${now:%H-%M-%S}
|
13 |
-
subdir: ${hydra.job.num}
|
14 |
-
|
15 |
-
job_logging:
|
16 |
-
handlers:
|
17 |
-
file:
|
18 |
-
# Incorporates fix from https://github.com/facebookresearch/hydra/pull/2242
|
19 |
-
filename: ${hydra.runtime.output_dir}/${task_name}.log
|
|
|
1 |
+
# https://hydra.cc/docs/configure_hydra/intro/
|
2 |
+
|
3 |
+
# enable color logging
|
4 |
+
defaults:
|
5 |
+
- override hydra_logging: colorlog
|
6 |
+
- override job_logging: colorlog
|
7 |
+
|
8 |
+
# output directory, generated dynamically on each run
|
9 |
+
run:
|
10 |
+
dir: ${paths.log_dir}/${task_name}/runs/${logger.aim.experiment}/${now:%Y-%m-%d}_${now:%H-%M-%S}
|
11 |
+
sweep:
|
12 |
+
dir: ${paths.log_dir}/${task_name}/multiruns/${logger.aim.experiment}/${now:%Y-%m-%d}_${now:%H-%M-%S}
|
13 |
+
subdir: ${hydra.job.num}
|
14 |
+
|
15 |
+
job_logging:
|
16 |
+
handlers:
|
17 |
+
file:
|
18 |
+
# Incorporates fix from https://github.com/facebookresearch/hydra/pull/2242
|
19 |
+
filename: ${hydra.runtime.output_dir}/${task_name}.log
|
configs/logger/aim.yaml
CHANGED
@@ -1,28 +1,28 @@
|
|
1 |
-
# https://aimstack.io/
|
2 |
-
|
3 |
-
# example usage in lightning module:
|
4 |
-
# https://github.com/aimhubio/aim/blob/main/examples/pytorch_lightning_track.py
|
5 |
-
|
6 |
-
# open the Aim UI with the following command (run in the folder containing the `.aim` folder):
|
7 |
-
# `aim up`
|
8 |
-
|
9 |
-
aim:
|
10 |
-
_target_: aim.pytorch_lightning.AimLogger
|
11 |
-
repo: ${paths.root_dir} # .aim folder will be created here
|
12 |
-
# repo: "aim://ip_address:port" # can instead provide IP address pointing to Aim remote tracking server which manages the repo, see https://aimstack.readthedocs.io/en/latest/using/remote_tracking.html#
|
13 |
-
|
14 |
-
# aim allows to group runs under experiment name
|
15 |
-
experiment: null # any string, set to "default" if not specified
|
16 |
-
|
17 |
-
train_metric_prefix: "train/"
|
18 |
-
val_metric_prefix: "val/"
|
19 |
-
test_metric_prefix: "test/"
|
20 |
-
|
21 |
-
# sets the tracking interval in seconds for system usage metrics (CPU, GPU, memory, etc.)
|
22 |
-
system_tracking_interval: 10 # set to null to disable system metrics tracking
|
23 |
-
|
24 |
-
# enable/disable logging of system params such as installed packages, git info, env vars, etc.
|
25 |
-
log_system_params: true
|
26 |
-
|
27 |
-
# enable/disable tracking console logs (default value is true)
|
28 |
-
capture_terminal_logs: false # set to false to avoid infinite console log loop issue https://github.com/aimhubio/aim/issues/2550
|
|
|
1 |
+
# https://aimstack.io/
|
2 |
+
|
3 |
+
# example usage in lightning module:
|
4 |
+
# https://github.com/aimhubio/aim/blob/main/examples/pytorch_lightning_track.py
|
5 |
+
|
6 |
+
# open the Aim UI with the following command (run in the folder containing the `.aim` folder):
|
7 |
+
# `aim up`
|
8 |
+
|
9 |
+
aim:
|
10 |
+
_target_: aim.pytorch_lightning.AimLogger
|
11 |
+
repo: ${paths.root_dir} # .aim folder will be created here
|
12 |
+
# repo: "aim://ip_address:port" # can instead provide IP address pointing to Aim remote tracking server which manages the repo, see https://aimstack.readthedocs.io/en/latest/using/remote_tracking.html#
|
13 |
+
|
14 |
+
# aim allows to group runs under experiment name
|
15 |
+
experiment: null # any string, set to "default" if not specified
|
16 |
+
|
17 |
+
train_metric_prefix: "train/"
|
18 |
+
val_metric_prefix: "val/"
|
19 |
+
test_metric_prefix: "test/"
|
20 |
+
|
21 |
+
# sets the tracking interval in seconds for system usage metrics (CPU, GPU, memory, etc.)
|
22 |
+
system_tracking_interval: 10 # set to null to disable system metrics tracking
|
23 |
+
|
24 |
+
# enable/disable logging of system params such as installed packages, git info, env vars, etc.
|
25 |
+
log_system_params: true
|
26 |
+
|
27 |
+
# enable/disable tracking console logs (default value is true)
|
28 |
+
capture_terminal_logs: false # set to false to avoid infinite console log loop issue https://github.com/aimhubio/aim/issues/2550
|
configs/logger/comet.yaml
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
# https://www.comet.ml
|
2 |
-
|
3 |
-
comet:
|
4 |
-
_target_: lightning.pytorch.loggers.comet.CometLogger
|
5 |
-
api_key: ${oc.env:COMET_API_TOKEN} # api key is loaded from environment variable
|
6 |
-
save_dir: "${paths.output_dir}"
|
7 |
-
project_name: "lightning-hydra-template"
|
8 |
-
rest_api_key: null
|
9 |
-
# experiment_name: ""
|
10 |
-
experiment_key: null # set to resume experiment
|
11 |
-
offline: False
|
12 |
-
prefix: ""
|
|
|
1 |
+
# https://www.comet.ml
|
2 |
+
|
3 |
+
comet:
|
4 |
+
_target_: lightning.pytorch.loggers.comet.CometLogger
|
5 |
+
api_key: ${oc.env:COMET_API_TOKEN} # api key is loaded from environment variable
|
6 |
+
save_dir: "${paths.output_dir}"
|
7 |
+
project_name: "lightning-hydra-template"
|
8 |
+
rest_api_key: null
|
9 |
+
# experiment_name: ""
|
10 |
+
experiment_key: null # set to resume experiment
|
11 |
+
offline: False
|
12 |
+
prefix: ""
|
configs/logger/csv.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
# csv logger built in lightning
|
2 |
-
|
3 |
-
csv:
|
4 |
-
_target_: lightning.pytorch.loggers.csv_logs.CSVLogger
|
5 |
-
save_dir: "${paths.output_dir}"
|
6 |
-
name: "csv/"
|
7 |
-
prefix: ""
|
|
|
1 |
+
# csv logger built in lightning
|
2 |
+
|
3 |
+
csv:
|
4 |
+
_target_: lightning.pytorch.loggers.csv_logs.CSVLogger
|
5 |
+
save_dir: "${paths.output_dir}"
|
6 |
+
name: "csv/"
|
7 |
+
prefix: ""
|
configs/logger/many_loggers.yaml
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
# train with many loggers at once
|
2 |
-
|
3 |
-
defaults:
|
4 |
-
# - comet
|
5 |
-
- csv
|
6 |
-
# - mlflow
|
7 |
-
# - neptune
|
8 |
-
- tensorboard
|
9 |
-
- wandb
|
|
|
1 |
+
# train with many loggers at once
|
2 |
+
|
3 |
+
defaults:
|
4 |
+
# - comet
|
5 |
+
- csv
|
6 |
+
# - mlflow
|
7 |
+
# - neptune
|
8 |
+
- tensorboard
|
9 |
+
- wandb
|
configs/logger/mlflow.yaml
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
# https://mlflow.org
|
2 |
-
|
3 |
-
mlflow:
|
4 |
-
_target_: lightning.pytorch.loggers.mlflow.MLFlowLogger
|
5 |
-
# experiment_name: ""
|
6 |
-
# run_name: ""
|
7 |
-
tracking_uri: ${paths.log_dir}/mlflow/mlruns # run `mlflow ui` command inside the `logs/mlflow/` dir to open the UI
|
8 |
-
tags: null
|
9 |
-
# save_dir: "./mlruns"
|
10 |
-
prefix: ""
|
11 |
-
artifact_location: null
|
12 |
-
# run_id: ""
|
|
|
1 |
+
# https://mlflow.org
|
2 |
+
|
3 |
+
mlflow:
|
4 |
+
_target_: lightning.pytorch.loggers.mlflow.MLFlowLogger
|
5 |
+
# experiment_name: ""
|
6 |
+
# run_name: ""
|
7 |
+
tracking_uri: ${paths.log_dir}/mlflow/mlruns # run `mlflow ui` command inside the `logs/mlflow/` dir to open the UI
|
8 |
+
tags: null
|
9 |
+
# save_dir: "./mlruns"
|
10 |
+
prefix: ""
|
11 |
+
artifact_location: null
|
12 |
+
# run_id: ""
|
configs/logger/neptune.yaml
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
# https://neptune.ai
|
2 |
-
|
3 |
-
neptune:
|
4 |
-
_target_: lightning.pytorch.loggers.neptune.NeptuneLogger
|
5 |
-
api_key: ${oc.env:NEPTUNE_API_TOKEN} # api key is loaded from environment variable
|
6 |
-
project: username/lightning-hydra-template
|
7 |
-
# name: ""
|
8 |
-
log_model_checkpoints: True
|
9 |
-
prefix: ""
|
|
|
1 |
+
# https://neptune.ai
|
2 |
+
|
3 |
+
neptune:
|
4 |
+
_target_: lightning.pytorch.loggers.neptune.NeptuneLogger
|
5 |
+
api_key: ${oc.env:NEPTUNE_API_TOKEN} # api key is loaded from environment variable
|
6 |
+
project: username/lightning-hydra-template
|
7 |
+
# name: ""
|
8 |
+
log_model_checkpoints: True
|
9 |
+
prefix: ""
|
configs/logger/tensorboard.yaml
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
-
# https://www.tensorflow.org/tensorboard/
|
2 |
-
|
3 |
-
tensorboard:
|
4 |
-
_target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
|
5 |
-
save_dir: "${paths.output_dir}/tensorboard/"
|
6 |
-
name: null
|
7 |
-
log_graph: False
|
8 |
-
default_hp_metric: True
|
9 |
-
prefix: ""
|
10 |
-
# version: ""
|
|
|
1 |
+
# https://www.tensorflow.org/tensorboard/
|
2 |
+
|
3 |
+
tensorboard:
|
4 |
+
_target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
|
5 |
+
save_dir: "${paths.output_dir}/tensorboard/"
|
6 |
+
name: null
|
7 |
+
log_graph: False
|
8 |
+
default_hp_metric: True
|
9 |
+
prefix: ""
|
10 |
+
# version: ""
|
configs/logger/wandb.yaml
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
-
# https://wandb.ai
|
2 |
-
|
3 |
-
wandb:
|
4 |
-
_target_: lightning.pytorch.loggers.wandb.WandbLogger
|
5 |
-
# name: "" # name of the run (normally generated by wandb)
|
6 |
-
save_dir: "${paths.output_dir}"
|
7 |
-
offline: False
|
8 |
-
id: null # pass correct id to resume experiment!
|
9 |
-
anonymous: null # enable anonymous logging
|
10 |
-
project: "lightning-hydra-template"
|
11 |
-
log_model: False # upload lightning ckpts
|
12 |
-
prefix: "" # a string to put at the beginning of metric keys
|
13 |
-
# entity: "" # set to name of your wandb team
|
14 |
-
group: ""
|
15 |
-
tags: []
|
16 |
-
job_type: ""
|
|
|
1 |
+
# https://wandb.ai
|
2 |
+
|
3 |
+
wandb:
|
4 |
+
_target_: lightning.pytorch.loggers.wandb.WandbLogger
|
5 |
+
# name: "" # name of the run (normally generated by wandb)
|
6 |
+
save_dir: "${paths.output_dir}"
|
7 |
+
offline: False
|
8 |
+
id: null # pass correct id to resume experiment!
|
9 |
+
anonymous: null # enable anonymous logging
|
10 |
+
project: "lightning-hydra-template"
|
11 |
+
log_model: False # upload lightning ckpts
|
12 |
+
prefix: "" # a string to put at the beginning of metric keys
|
13 |
+
# entity: "" # set to name of your wandb team
|
14 |
+
group: ""
|
15 |
+
tags: []
|
16 |
+
job_type: ""
|
configs/model/cdnetv1/README.md
CHANGED
@@ -1,117 +1,117 @@
|
|
1 |
-
# CDnet: CNN-Based Cloud Detection for Remote Sensing Imagery
|
2 |
-
|
3 |
-
> [CDnet: CNN-Based Cloud Detection for Remote Sensing Imagery](https://ieeexplore.ieee.org/document/8681238)
|
4 |
-
|
5 |
-
## Introduction
|
6 |
-
|
7 |
-
<!-- [ALGORITHM] -->
|
8 |
-
|
9 |
-
<a href="https://ieeexplore.ieee.org/document/8681238">Official Repo</a>
|
10 |
-
|
11 |
-
<a href="https://github.com/nkszjx/CDnetV2-pytorch-master/tree/main">Code Snippet</a>
|
12 |
-
|
13 |
-
## Abstract
|
14 |
-
|
15 |
-
<!-- [ABSTRACT] -->
|
16 |
-
|
17 |
-
Cloud detection is one of the important tasks for remote sensing image (RSI) preprocessing. In this paper, we utilize the thumbnail (i.e., preview image) of RSI, which contains the information of original multispectral or panchromatic imagery, to extract cloud mask efficiently. Compared with detection cloud mask from original RSI, it is more challenging to detect cloud mask using thumbnails due to the loss of resolution and spectrum information. To tackle this problem, we propose a cloud detection neural network (CDnet) with an encoder–decoder structure, a feature pyramid module (FPM), and a boundary refinement (BR) block. The FPM extracts the multiscale contextual information without the loss of resolution and coverage; the BR block refines object boundaries; and the encoder–decoder structure gradually recovers segmentation results with the same size as input image. Experimental results on the ZY-3 satellite thumbnails cloud cover validation data set and two other validation data sets (GF-1 WFV Cloud and Cloud Shadow Cover Validation Data and Landsat-8 Cloud Cover Assessment Validation Data) demonstrate that the proposed method achieves accurate detection accuracy and outperforms several state-of-the-art methods.
|
18 |
-
|
19 |
-
|
20 |
-
<!-- [IMAGE] -->
|
21 |
-
|
22 |
-
<div align=center>
|
23 |
-
<img src="https://github.com/user-attachments/assets/502456ca-0f61-4616-99e7-baa93ee6d5e2" width="70%"/>
|
24 |
-
</div>
|
25 |
-
|
26 |
-
## Results and models
|
27 |
-
|
28 |
-
### CLOUD EXTRACTION ACCURACY (%)
|
29 |
-
|
30 |
-
|
31 |
-
| Method | OA | MIoU | Kappa | PA | UA |
|
32 |
-
|----------|-------|-------|-------|-------|-------|
|
33 |
-
| CDnet(ASPP+GAP) | 95.41 | 89.38 | 82.05 | 87.82 | 89.85 |
|
34 |
-
| CDnet(FPM) | 96.47 | 91.70 | 85.06 | 89.75 | 90.41 |
|
35 |
-
|
36 |
-
|
37 |
-
### CLOUD EXTRACTION ACCURACY (%) FOR MODULES AND VARIANTS OF THE CDNET
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
| Method | OA | MIoU | Kappa | PA | UA |
|
42 |
-
|----------|-------|-------|-------|-------|-------|
|
43 |
-
| ResNet50 | 91.13 | 82.83 | 73.38 | 81.99 | 80.34 |
|
44 |
-
| MRN* | 93.03 | 85.24 | 77.51 | 82.59 | 82.82 |
|
45 |
-
| MRN+FPM | 93.89 | 88.50 | 81.82 | 87.10 | 85.51 |
|
46 |
-
| MRN+FPM+BR| 94.31 | 88.97 | 82.59 | 87.12 | 87.04 |
|
47 |
-
| CDnet-FPM | 93.14 | 88.14 | 80.44 | 87.64 | 84.46 |
|
48 |
-
| CDnet-BR | 95.04 | 89.63 | 83.78 | 87.36 | 88.67 |
|
49 |
-
| CDnet-FPM-BR| 93.10 | 87.91 | 80.01 | 87.01 | 83.84 |
|
50 |
-
| CDnet-A | 94.84 | 89.41 | 82.91 | 87.32 | 88.07 |
|
51 |
-
| CDnet-B | 95.27 | 90.51 | 84.01 | 88.97 | 89.71 |
|
52 |
-
| CDnet-C | 96.09 | 90.73 | 84.27 | 88.74 | 90.28 |
|
53 |
-
| CDnet | 96.47 | 91.70 | 85.06 | 89.75 | 90.41 |
|
54 |
-
|
55 |
-
MRN stands for the modified ResNet-50.
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
### CLOUD EXTRACTION ACCURACY (%)
|
60 |
-
|
61 |
-
|
62 |
-
| Method | OA | MIoU | Kappa | PA | UA |
|
63 |
-
|----------|-------|-------|-------|-------|-------|
|
64 |
-
| Maxlike | 77.73 | 66.16 | 53.55 | 91.30 | 54.98 |
|
65 |
-
| SVM | 78.21 | 66.79 | 54.87 | 91.77 | 56.37 |
|
66 |
-
| L-unet | 86.51 | 73.67 | 63.79 | 83.15 | 64.79 |
|
67 |
-
| FCN-8 | 90.53 | 81.08 | 68.08 | 82.91 | 78.87 |
|
68 |
-
| MVGG-16 | 92.73 | 86.65 | 78.94 | 88.12 | 81.84 |
|
69 |
-
| DPN | 93.11 | 86.73 | 79.05 | 87.68 | 83.96 |
|
70 |
-
| DeeplabV2 | 93.36 | 87.56 | 79.12 | 87.50 | 84.65 |
|
71 |
-
| PSPnet | 94.24 | 88.37 | 81.41 | 86.67 | 89.17 |
|
72 |
-
| DeeplabV3 | 95.03 | 88.74 | 81.53 | 87.63 | 89.72 |
|
73 |
-
| DeeplabV3+| 96.01 | 90.45 | 83.92 | 88.47 | 90.03 |
|
74 |
-
| CDnet | 96.47 | 91.70 | 85.06 | 89.75 | 90.41 |
|
75 |
-
|
76 |
-
|
77 |
-
### Cloud Extraction Accuracy (%) of GF-1 Satellite Imagery
|
78 |
-
|
79 |
-
| Method | OA | MIoU | Kappa | PA | UA |
|
80 |
-
|----------|-------|-------|-------|-------|-------|
|
81 |
-
| MFC | 92.36 | 80.32 | 74.64 | 83.58 | 75.32 |
|
82 |
-
| L-unet | 92.44 | 82.39 | 76.26 | 87.61 | 74.98 |
|
83 |
-
| FCN-8 | 92.61 | 82.71 | 76.45 | 87.45 | 75.61 |
|
84 |
-
| MVGG-16 | 93.07 | 86.17 | 77.13 | 87.68 | 79.50 |
|
85 |
-
| DPN | 93.19 | 86.32 | 77.25 | 86.85 | 80.93 |
|
86 |
-
| DeeplabV2 | 95.07 | 87.00 | 80.07 | 86.60 | 82.18 |
|
87 |
-
| PSPnet | 95.30 | 87.45 | 80.74 | 85.87 | 83.27 |
|
88 |
-
| DeeplabV3 | 95.95 | 88.13 | 81.05 | 86.36 | 88.72 |
|
89 |
-
| DeeplabV3+| 96.18 | 89.11 | 82.31 | 87.37 | 89.05 |
|
90 |
-
| CDnet | 96.73 | 89.83 | 83.23 | 87.94 | 89.60 |
|
91 |
-
|
92 |
-
### Cloud Extraction Accuracy (%) of Landsat-8 Satellite Imagery
|
93 |
-
|
94 |
-
| Method | OA | MIoU | Kappa | PA | UA |
|
95 |
-
|----------|-------|-------|-------|-------|-------|
|
96 |
-
| Fmask | 85.21 | 71.52 | 63.01 | 86.24 | 70.38 |
|
97 |
-
| L-unet | 90.56 | 77.95 | 68.79 | 79.32 | 78.94 |
|
98 |
-
| FCN-8 | 90.88 | 78.84 | 71.32 | 76.28 | 82.31 |
|
99 |
-
| MVGG-16 | 93.31 | 81.59 | 77.08 | 77.29 | 83.00 |
|
100 |
-
| DPN | 93.40 | 86.34 | 81.52 | 84.61 | 89.93 |
|
101 |
-
| DeeplabV2 | 94.11 | 86.90 | 81.63 | 84.93 | 89.87 |
|
102 |
-
| PSPnet | 95.43 | 88.29 | 83.12 | 86.98 | 90.59 |
|
103 |
-
| DeeplabV3 | 96.38 | 90.32 | 84.31 | 89.52 | 91.92 |
|
104 |
-
| CDnet | 97.16 | 90.84 | 84.91 | 90.15 | 92.08 |
|
105 |
-
|
106 |
-
## Citation
|
107 |
-
|
108 |
-
```bibtex
|
109 |
-
@ARTICLE{8681238,
|
110 |
-
author={J. {Yang} and J. {Guo} and H. {Yue} and Z. {Liu} and H. {Hu} and K. {Li}},
|
111 |
-
journal={IEEE Transactions on Geoscience and Remote Sensing},
|
112 |
-
title={CDnet: CNN-Based Cloud Detection for Remote Sensing Imagery},
|
113 |
-
year={2019},
|
114 |
-
volume={57},
|
115 |
-
number={8},
|
116 |
-
pages={6195-6211}, doi={10.1109/TGRS.2019.2904868} }
|
117 |
-
```
|
|
|
1 |
+
# CDnet: CNN-Based Cloud Detection for Remote Sensing Imagery
|
2 |
+
|
3 |
+
> [CDnet: CNN-Based Cloud Detection for Remote Sensing Imagery](https://ieeexplore.ieee.org/document/8681238)
|
4 |
+
|
5 |
+
## Introduction
|
6 |
+
|
7 |
+
<!-- [ALGORITHM] -->
|
8 |
+
|
9 |
+
<a href="https://ieeexplore.ieee.org/document/8681238">Official Repo</a>
|
10 |
+
|
11 |
+
<a href="https://github.com/nkszjx/CDnetV2-pytorch-master/tree/main">Code Snippet</a>
|
12 |
+
|
13 |
+
## Abstract
|
14 |
+
|
15 |
+
<!-- [ABSTRACT] -->
|
16 |
+
|
17 |
+
Cloud detection is one of the important tasks for remote sensing image (RSI) preprocessing. In this paper, we utilize the thumbnail (i.e., preview image) of RSI, which contains the information of original multispectral or panchromatic imagery, to extract cloud mask efficiently. Compared with detection cloud mask from original RSI, it is more challenging to detect cloud mask using thumbnails due to the loss of resolution and spectrum information. To tackle this problem, we propose a cloud detection neural network (CDnet) with an encoder–decoder structure, a feature pyramid module (FPM), and a boundary refinement (BR) block. The FPM extracts the multiscale contextual information without the loss of resolution and coverage; the BR block refines object boundaries; and the encoder–decoder structure gradually recovers segmentation results with the same size as input image. Experimental results on the ZY-3 satellite thumbnails cloud cover validation data set and two other validation data sets (GF-1 WFV Cloud and Cloud Shadow Cover Validation Data and Landsat-8 Cloud Cover Assessment Validation Data) demonstrate that the proposed method achieves accurate detection accuracy and outperforms several state-of-the-art methods.
|
18 |
+
|
19 |
+
|
20 |
+
<!-- [IMAGE] -->
|
21 |
+
|
22 |
+
<div align=center>
|
23 |
+
<img src="https://github.com/user-attachments/assets/502456ca-0f61-4616-99e7-baa93ee6d5e2" width="70%"/>
|
24 |
+
</div>
|
25 |
+
|
26 |
+
## Results and models
|
27 |
+
|
28 |
+
### CLOUD EXTRACTION ACCURACY (%)
|
29 |
+
|
30 |
+
|
31 |
+
| Method | OA | MIoU | Kappa | PA | UA |
|
32 |
+
|----------|-------|-------|-------|-------|-------|
|
33 |
+
| CDnet(ASPP+GAP) | 95.41 | 89.38 | 82.05 | 87.82 | 89.85 |
|
34 |
+
| CDnet(FPM) | 96.47 | 91.70 | 85.06 | 89.75 | 90.41 |
|
35 |
+
|
36 |
+
|
37 |
+
### CLOUD EXTRACTION ACCURACY (%) FOR MODULES AND VARIANTS OF THE CDNET
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
| Method | OA | MIoU | Kappa | PA | UA |
|
42 |
+
|----------|-------|-------|-------|-------|-------|
|
43 |
+
| ResNet50 | 91.13 | 82.83 | 73.38 | 81.99 | 80.34 |
|
44 |
+
| MRN* | 93.03 | 85.24 | 77.51 | 82.59 | 82.82 |
|
45 |
+
| MRN+FPM | 93.89 | 88.50 | 81.82 | 87.10 | 85.51 |
|
46 |
+
| MRN+FPM+BR| 94.31 | 88.97 | 82.59 | 87.12 | 87.04 |
|
47 |
+
| CDnet-FPM | 93.14 | 88.14 | 80.44 | 87.64 | 84.46 |
|
48 |
+
| CDnet-BR | 95.04 | 89.63 | 83.78 | 87.36 | 88.67 |
|
49 |
+
| CDnet-FPM-BR| 93.10 | 87.91 | 80.01 | 87.01 | 83.84 |
|
50 |
+
| CDnet-A | 94.84 | 89.41 | 82.91 | 87.32 | 88.07 |
|
51 |
+
| CDnet-B | 95.27 | 90.51 | 84.01 | 88.97 | 89.71 |
|
52 |
+
| CDnet-C | 96.09 | 90.73 | 84.27 | 88.74 | 90.28 |
|
53 |
+
| CDnet | 96.47 | 91.70 | 85.06 | 89.75 | 90.41 |
|
54 |
+
|
55 |
+
MRN stands for the modified ResNet-50.
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
### CLOUD EXTRACTION ACCURACY (%)
|
60 |
+
|
61 |
+
|
62 |
+
| Method | OA | MIoU | Kappa | PA | UA |
|
63 |
+
|----------|-------|-------|-------|-------|-------|
|
64 |
+
| Maxlike | 77.73 | 66.16 | 53.55 | 91.30 | 54.98 |
|
65 |
+
| SVM | 78.21 | 66.79 | 54.87 | 91.77 | 56.37 |
|
66 |
+
| L-unet | 86.51 | 73.67 | 63.79 | 83.15 | 64.79 |
|
67 |
+
| FCN-8 | 90.53 | 81.08 | 68.08 | 82.91 | 78.87 |
|
68 |
+
| MVGG-16 | 92.73 | 86.65 | 78.94 | 88.12 | 81.84 |
|
69 |
+
| DPN | 93.11 | 86.73 | 79.05 | 87.68 | 83.96 |
|
70 |
+
| DeeplabV2 | 93.36 | 87.56 | 79.12 | 87.50 | 84.65 |
|
71 |
+
| PSPnet | 94.24 | 88.37 | 81.41 | 86.67 | 89.17 |
|
72 |
+
| DeeplabV3 | 95.03 | 88.74 | 81.53 | 87.63 | 89.72 |
|
73 |
+
| DeeplabV3+| 96.01 | 90.45 | 83.92 | 88.47 | 90.03 |
|
74 |
+
| CDnet | 96.47 | 91.70 | 85.06 | 89.75 | 90.41 |
|
75 |
+
|
76 |
+
|
77 |
+
### Cloud Extraction Accuracy (%) of GF-1 Satellite Imagery
|
78 |
+
|
79 |
+
| Method | OA | MIoU | Kappa | PA | UA |
|
80 |
+
|----------|-------|-------|-------|-------|-------|
|
81 |
+
| MFC | 92.36 | 80.32 | 74.64 | 83.58 | 75.32 |
|
82 |
+
| L-unet | 92.44 | 82.39 | 76.26 | 87.61 | 74.98 |
|
83 |
+
| FCN-8 | 92.61 | 82.71 | 76.45 | 87.45 | 75.61 |
|
84 |
+
| MVGG-16 | 93.07 | 86.17 | 77.13 | 87.68 | 79.50 |
|
85 |
+
| DPN | 93.19 | 86.32 | 77.25 | 86.85 | 80.93 |
|
86 |
+
| DeeplabV2 | 95.07 | 87.00 | 80.07 | 86.60 | 82.18 |
|
87 |
+
| PSPnet | 95.30 | 87.45 | 80.74 | 85.87 | 83.27 |
|
88 |
+
| DeeplabV3 | 95.95 | 88.13 | 81.05 | 86.36 | 88.72 |
|
89 |
+
| DeeplabV3+| 96.18 | 89.11 | 82.31 | 87.37 | 89.05 |
|
90 |
+
| CDnet | 96.73 | 89.83 | 83.23 | 87.94 | 89.60 |
|
91 |
+
|
92 |
+
### Cloud Extraction Accuracy (%) of Landsat-8 Satellite Imagery
|
93 |
+
|
94 |
+
| Method | OA | MIoU | Kappa | PA | UA |
|
95 |
+
|----------|-------|-------|-------|-------|-------|
|
96 |
+
| Fmask | 85.21 | 71.52 | 63.01 | 86.24 | 70.38 |
|
97 |
+
| L-unet | 90.56 | 77.95 | 68.79 | 79.32 | 78.94 |
|
98 |
+
| FCN-8 | 90.88 | 78.84 | 71.32 | 76.28 | 82.31 |
|
99 |
+
| MVGG-16 | 93.31 | 81.59 | 77.08 | 77.29 | 83.00 |
|
100 |
+
| DPN | 93.40 | 86.34 | 81.52 | 84.61 | 89.93 |
|
101 |
+
| DeeplabV2 | 94.11 | 86.90 | 81.63 | 84.93 | 89.87 |
|
102 |
+
| PSPnet | 95.43 | 88.29 | 83.12 | 86.98 | 90.59 |
|
103 |
+
| DeeplabV3 | 96.38 | 90.32 | 84.31 | 89.52 | 91.92 |
|
104 |
+
| CDnet | 97.16 | 90.84 | 84.91 | 90.15 | 92.08 |
|
105 |
+
|
106 |
+
## Citation
|
107 |
+
|
108 |
+
```bibtex
|
109 |
+
@ARTICLE{8681238,
|
110 |
+
author={J. {Yang} and J. {Guo} and H. {Yue} and Z. {Liu} and H. {Hu} and K. {Li}},
|
111 |
+
journal={IEEE Transactions on Geoscience and Remote Sensing},
|
112 |
+
title={CDnet: CNN-Based Cloud Detection for Remote Sensing Imagery},
|
113 |
+
year={2019},
|
114 |
+
volume={57},
|
115 |
+
number={8},
|
116 |
+
pages={6195-6211}, doi={10.1109/TGRS.2019.2904868} }
|
117 |
+
```
|
configs/model/cdnetv1/cdnetv1.yaml
CHANGED
@@ -1,19 +1,19 @@
|
|
1 |
-
_target_: src.models.base_module.BaseLitModule
|
2 |
-
num_classes: 2
|
3 |
-
|
4 |
-
net:
|
5 |
-
_target_: src.models.components.cdnetv1.CDnetV1
|
6 |
-
num_classes: 2
|
7 |
-
|
8 |
-
criterion:
|
9 |
-
_target_: torch.nn.CrossEntropyLoss
|
10 |
-
|
11 |
-
optimizer:
|
12 |
-
_target_: torch.optim.SGD
|
13 |
-
_partial_: true
|
14 |
-
lr: 0.0001
|
15 |
-
|
16 |
-
scheduler: null
|
17 |
-
|
18 |
-
# compile model for faster training with pytorch 2.0
|
19 |
-
compile: false
|
|
|
1 |
+
_target_: src.models.base_module.BaseLitModule
|
2 |
+
num_classes: 2
|
3 |
+
|
4 |
+
net:
|
5 |
+
_target_: src.models.components.cdnetv1.CDnetV1
|
6 |
+
num_classes: 2
|
7 |
+
|
8 |
+
criterion:
|
9 |
+
_target_: torch.nn.CrossEntropyLoss
|
10 |
+
|
11 |
+
optimizer:
|
12 |
+
_target_: torch.optim.SGD
|
13 |
+
_partial_: true
|
14 |
+
lr: 0.0001
|
15 |
+
|
16 |
+
scheduler: null
|
17 |
+
|
18 |
+
# compile model for faster training with pytorch 2.0
|
19 |
+
compile: false
|
configs/model/cdnetv2/README.md
CHANGED
@@ -1,90 +1,90 @@
|
|
1 |
-
# CDnetV2: CNN-Based Cloud Detection for Remote Sensing Imagery With Cloud-Snow Coexistence
|
2 |
-
|
3 |
-
> [CDnetV2: CNN-Based Cloud Detection for Remote Sensing Imagery With Cloud-Snow Coexistence](https://ieeexplore.ieee.org/document/9094671)
|
4 |
-
|
5 |
-
## Introduction
|
6 |
-
|
7 |
-
<!-- [ALGORITHM] -->
|
8 |
-
|
9 |
-
<a href="https://ieeexplore.ieee.org/document/9094671">Official Repo</a>
|
10 |
-
|
11 |
-
<a href="https://github.com/nkszjx/CDnetV2-pytorch-master">Code Snippet</a>
|
12 |
-
|
13 |
-
## Abstract
|
14 |
-
|
15 |
-
<!-- [ABSTRACT] -->
|
16 |
-
|
17 |
-
Cloud detection is a crucial preprocessing step for optical satellite remote sensing (RS) images. This article focuses on the cloud detection for RS imagery with cloud-snow coexistence and the utilization of the satellite thumbnails that lose considerable amount of high resolution and spectrum information of original RS images to extract cloud mask efficiently. To tackle this problem, we propose a novel cloud detection neural network with an encoder-decoder structure, named CDnetV2, as a series work on cloud detection. Compared with our previous CDnetV1, CDnetV2 contains two novel modules, that is, adaptive feature fusing model (AFFM) and high-level semantic information guidance flows (HSIGFs). AFFM is used to fuse multilevel feature maps by three submodules: channel attention fusion model (CAFM), spatial attention fusion model (SAFM), and channel attention refinement model (CARM). HSIGFs are designed to make feature layers at decoder of CDnetV2 be aware of the locations of the cloud objects. The high-level semantic information of HSIGFs is extracted by a proposed high-level feature fusing model (HFFM). By being equipped with these two proposed key modules, AFFM and HSIGFs, CDnetV2 is able to fully utilize features extracted from encoder layers and yield accurate cloud detection results. Experimental results on the ZY-3 satellite thumbnail data set demonstrate that the proposed CDnetV2 achieves accurate detection accuracy and outperforms several state-of-the-art methods.
|
18 |
-
|
19 |
-
<!-- [IMAGE] -->
|
20 |
-
|
21 |
-
<div align=center>
|
22 |
-
<img src="https://github.com/user-attachments/assets/8e213e6f-3c7a-45f1-b9fc-de7fe6c92489" width="70%"/>
|
23 |
-
</div>
|
24 |
-
|
25 |
-
## Results and models
|
26 |
-
|
27 |
-
### CLOUD EXTRACTION ACCURACY (%) OF DIFFERENT CNN-BASED METHODS ON ZY-3 SATELLITE THUMBNAILS
|
28 |
-
|
29 |
-
|
30 |
-
| Method | OA | MIoU | Kappa | PA | UA |
|
31 |
-
|-------|-------|-------|-------|-------|-------|
|
32 |
-
| MSegNet | 90.86 | 81.20 | 75.57 | 73.78 | 86.13 |
|
33 |
-
| MUnet | 91.62 | 82.51 | 76.70 | 74.44 | 87.39 |
|
34 |
-
| PSPnet | 90.58 | 81.63 | 75.36 | 76.02 | 87.52 |
|
35 |
-
| DeeplabV3+ | 91.80 | 82.62 | 77.65 | 75.30 | 87.76 |
|
36 |
-
| CDnetV1 | 93.15 | 82.80 | 79.21 | 82.37 | 86.72 |
|
37 |
-
| CDnetV2 | 95.76 | 86.62 | 82.51 | 87.75 | 88.58 |
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
### STATISTICAL RESULTS OF CLOUDAGE ESTIMATION ERROR IN TERMS OF THE MAD AND ITS VARIANCE
|
42 |
-
|
43 |
-
|
44 |
-
| Methods | Mean value ($\mu$) | Standard Deviation ($\sigma^2$)) |
|
45 |
-
|------------|--------------------|----------------------------------|
|
46 |
-
| CDnetV2 | 0.0241 | 0.0220 |
|
47 |
-
| CDnetV1 | 0.0357 | 0.0288 |
|
48 |
-
| DeeplabV3+ | 0.0456 | 0.0301 |
|
49 |
-
| PSPnet | 0.0487 | 0.0380 |
|
50 |
-
| MUnet | 0.0544 | 0.0583 |
|
51 |
-
| MSegNet | 0.0572 | 0.0591 |
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
### COMPUTATIONAL COMPLEXITY ANALYS IS OF DIFFERENT CNN-BASED METHODS
|
57 |
-
|
58 |
-
| Methods | GFLOPs(224×224) | Trainable params | Running time (s)(1k×1k) |
|
59 |
-
|------------|-----------------|------------------|-------------------------|
|
60 |
-
| CDnetV2 | 31.5 | 65.9 M | 1.31 |
|
61 |
-
| CDnetV1 | 48.5 | 64.8 M | 1.26 |
|
62 |
-
| DeeplabV3+ | 31.8 | 40.3 M | 1.14 |
|
63 |
-
| PSPnet | 19.3 | 46.6 M | 1.05 |
|
64 |
-
| MUnet | 25.2 | 8.6 M | 1.09 |
|
65 |
-
| MSegNet | 90.2 | 29.7 M | 1.28 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
## Citation
|
71 |
-
|
72 |
-
```bibtex
|
73 |
-
@ARTICLE{8681238,
|
74 |
-
author={J. {Yang} and J. {Guo} and H. {Yue} and Z. {Liu} and H. {Hu} and K. {Li}},
|
75 |
-
journal={IEEE Transactions on Geoscience and Remote Sensing},
|
76 |
-
title={CDnet: CNN-Based Cloud Detection for Remote Sensing Imagery},
|
77 |
-
year={2019}, volume={57},
|
78 |
-
number={8}, pages={6195-6211},
|
79 |
-
doi={10.1109/TGRS.2019.2904868} }
|
80 |
-
|
81 |
-
@ARTICLE{9094671,
|
82 |
-
author={J. {Guo} and J. {Yang} and H. {Yue} and H. {Tan} and C. {Hou} and K. {Li}},
|
83 |
-
journal={IEEE Transactions on Geoscience and Remote Sensing},
|
84 |
-
title={CDnetV2: CNN-Based Cloud Detection for Remote Sensing Imagery With Cloud-Snow Coexistence},
|
85 |
-
year={2021},
|
86 |
-
volume={59},
|
87 |
-
number={1},
|
88 |
-
pages={700-713},
|
89 |
-
doi={10.1109/TGRS.2020.2991398} }
|
90 |
-
```
|
|
|
1 |
+
# CDnetV2: CNN-Based Cloud Detection for Remote Sensing Imagery With Cloud-Snow Coexistence
|
2 |
+
|
3 |
+
> [CDnetV2: CNN-Based Cloud Detection for Remote Sensing Imagery With Cloud-Snow Coexistence](https://ieeexplore.ieee.org/document/9094671)
|
4 |
+
|
5 |
+
## Introduction
|
6 |
+
|
7 |
+
<!-- [ALGORITHM] -->
|
8 |
+
|
9 |
+
<a href="https://ieeexplore.ieee.org/document/9094671">Official Repo</a>
|
10 |
+
|
11 |
+
<a href="https://github.com/nkszjx/CDnetV2-pytorch-master">Code Snippet</a>
|
12 |
+
|
13 |
+
## Abstract
|
14 |
+
|
15 |
+
<!-- [ABSTRACT] -->
|
16 |
+
|
17 |
+
Cloud detection is a crucial preprocessing step for optical satellite remote sensing (RS) images. This article focuses on the cloud detection for RS imagery with cloud-snow coexistence and the utilization of the satellite thumbnails that lose considerable amount of high resolution and spectrum information of original RS images to extract cloud mask efficiently. To tackle this problem, we propose a novel cloud detection neural network with an encoder-decoder structure, named CDnetV2, as a series work on cloud detection. Compared with our previous CDnetV1, CDnetV2 contains two novel modules, that is, adaptive feature fusing model (AFFM) and high-level semantic information guidance flows (HSIGFs). AFFM is used to fuse multilevel feature maps by three submodules: channel attention fusion model (CAFM), spatial attention fusion model (SAFM), and channel attention refinement model (CARM). HSIGFs are designed to make feature layers at decoder of CDnetV2 be aware of the locations of the cloud objects. The high-level semantic information of HSIGFs is extracted by a proposed high-level feature fusing model (HFFM). By being equipped with these two proposed key modules, AFFM and HSIGFs, CDnetV2 is able to fully utilize features extracted from encoder layers and yield accurate cloud detection results. Experimental results on the ZY-3 satellite thumbnail data set demonstrate that the proposed CDnetV2 achieves accurate detection accuracy and outperforms several state-of-the-art methods.
|
18 |
+
|
19 |
+
<!-- [IMAGE] -->
|
20 |
+
|
21 |
+
<div align=center>
|
22 |
+
<img src="https://github.com/user-attachments/assets/8e213e6f-3c7a-45f1-b9fc-de7fe6c92489" width="70%"/>
|
23 |
+
</div>
|
24 |
+
|
25 |
+
## Results and models
|
26 |
+
|
27 |
+
### CLOUD EXTRACTION ACCURACY (%) OF DIFFERENT CNN-BASED METHODS ON ZY-3 SATELLITE THUMBNAILS
|
28 |
+
|
29 |
+
|
30 |
+
| Method | OA | MIoU | Kappa | PA | UA |
|
31 |
+
|-------|-------|-------|-------|-------|-------|
|
32 |
+
| MSegNet | 90.86 | 81.20 | 75.57 | 73.78 | 86.13 |
|
33 |
+
| MUnet | 91.62 | 82.51 | 76.70 | 74.44 | 87.39 |
|
34 |
+
| PSPnet | 90.58 | 81.63 | 75.36 | 76.02 | 87.52 |
|
35 |
+
| DeeplabV3+ | 91.80 | 82.62 | 77.65 | 75.30 | 87.76 |
|
36 |
+
| CDnetV1 | 93.15 | 82.80 | 79.21 | 82.37 | 86.72 |
|
37 |
+
| CDnetV2 | 95.76 | 86.62 | 82.51 | 87.75 | 88.58 |
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
### STATISTICAL RESULTS OF CLOUDAGE ESTIMATION ERROR IN TERMS OF THE MAD AND ITS VARIANCE
|
42 |
+
|
43 |
+
|
44 |
+
| Methods | Mean value ($\mu$) | Standard Deviation ($\sigma^2$)) |
|
45 |
+
|------------|--------------------|----------------------------------|
|
46 |
+
| CDnetV2 | 0.0241 | 0.0220 |
|
47 |
+
| CDnetV1 | 0.0357 | 0.0288 |
|
48 |
+
| DeeplabV3+ | 0.0456 | 0.0301 |
|
49 |
+
| PSPnet | 0.0487 | 0.0380 |
|
50 |
+
| MUnet | 0.0544 | 0.0583 |
|
51 |
+
| MSegNet | 0.0572 | 0.0591 |
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
### COMPUTATIONAL COMPLEXITY ANALYS IS OF DIFFERENT CNN-BASED METHODS
|
57 |
+
|
58 |
+
| Methods | GFLOPs(224×224) | Trainable params | Running time (s)(1k×1k) |
|
59 |
+
|------------|-----------------|------------------|-------------------------|
|
60 |
+
| CDnetV2 | 31.5 | 65.9 M | 1.31 |
|
61 |
+
| CDnetV1 | 48.5 | 64.8 M | 1.26 |
|
62 |
+
| DeeplabV3+ | 31.8 | 40.3 M | 1.14 |
|
63 |
+
| PSPnet | 19.3 | 46.6 M | 1.05 |
|
64 |
+
| MUnet | 25.2 | 8.6 M | 1.09 |
|
65 |
+
| MSegNet | 90.2 | 29.7 M | 1.28 |
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
## Citation
|
71 |
+
|
72 |
+
```bibtex
|
73 |
+
@ARTICLE{8681238,
|
74 |
+
author={J. {Yang} and J. {Guo} and H. {Yue} and Z. {Liu} and H. {Hu} and K. {Li}},
|
75 |
+
journal={IEEE Transactions on Geoscience and Remote Sensing},
|
76 |
+
title={CDnet: CNN-Based Cloud Detection for Remote Sensing Imagery},
|
77 |
+
year={2019}, volume={57},
|
78 |
+
number={8}, pages={6195-6211},
|
79 |
+
doi={10.1109/TGRS.2019.2904868} }
|
80 |
+
|
81 |
+
@ARTICLE{9094671,
|
82 |
+
author={J. {Guo} and J. {Yang} and H. {Yue} and H. {Tan} and C. {Hou} and K. {Li}},
|
83 |
+
journal={IEEE Transactions on Geoscience and Remote Sensing},
|
84 |
+
title={CDnetV2: CNN-Based Cloud Detection for Remote Sensing Imagery With Cloud-Snow Coexistence},
|
85 |
+
year={2021},
|
86 |
+
volume={59},
|
87 |
+
number={1},
|
88 |
+
pages={700-713},
|
89 |
+
doi={10.1109/TGRS.2020.2991398} }
|
90 |
+
```
|
configs/model/cdnetv2/cdnetv2.yaml
CHANGED
@@ -1,19 +1,19 @@
|
|
1 |
-
_target_: src.models.cdnetv2_module.CDNetv2LitModule
|
2 |
-
|
3 |
-
net:
|
4 |
-
_target_: src.models.components.cdnetv2.CDnetV2
|
5 |
-
num_classes: 2
|
6 |
-
|
7 |
-
num_classes: 2
|
8 |
-
|
9 |
-
criterion:
|
10 |
-
_target_: src.loss.cdnetv2_loss.CDnetv2Loss
|
11 |
-
loss_fn:
|
12 |
-
_target_: torch.nn.CrossEntropyLoss
|
13 |
-
|
14 |
-
optimizer:
|
15 |
-
_target_: torch.optim.SGD
|
16 |
-
_partial_: true
|
17 |
-
lr: 0.0001
|
18 |
-
|
19 |
scheduler: null
|
|
|
1 |
+
_target_: src.models.cdnetv2_module.CDNetv2LitModule
|
2 |
+
|
3 |
+
net:
|
4 |
+
_target_: src.models.components.cdnetv2.CDnetV2
|
5 |
+
num_classes: 2
|
6 |
+
|
7 |
+
num_classes: 2
|
8 |
+
|
9 |
+
criterion:
|
10 |
+
_target_: src.loss.cdnetv2_loss.CDnetv2Loss
|
11 |
+
loss_fn:
|
12 |
+
_target_: torch.nn.CrossEntropyLoss
|
13 |
+
|
14 |
+
optimizer:
|
15 |
+
_target_: torch.optim.SGD
|
16 |
+
_partial_: true
|
17 |
+
lr: 0.0001
|
18 |
+
|
19 |
scheduler: null
|