radames commited on
Commit
a0bcaae
Β·
0 Parent(s):
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. .gitignore +156 -0
  3. DragGAN.gif +3 -0
  4. LICENSE.txt +97 -0
  5. README.md +73 -0
  6. arial.ttf +0 -0
  7. dnnlib/__init__.py +9 -0
  8. dnnlib/util.py +491 -0
  9. environment.yml +24 -0
  10. gen_images.py +149 -0
  11. gradio_utils/__init__.py +9 -0
  12. gradio_utils/utils.py +154 -0
  13. gui_utils/__init__.py +9 -0
  14. gui_utils/gl_utils.py +416 -0
  15. gui_utils/glfw_window.py +229 -0
  16. gui_utils/imgui_utils.py +191 -0
  17. gui_utils/imgui_window.py +103 -0
  18. gui_utils/text_utils.py +123 -0
  19. legacy.py +323 -0
  20. requirements.txt +20 -0
  21. scripts/download_model.sh +19 -0
  22. scripts/gui.sh +11 -0
  23. stylegan_human/.gitignore +10 -0
  24. stylegan_human/PP_HumanSeg/deploy/infer.py +180 -0
  25. stylegan_human/PP_HumanSeg/export_model/download_export_model.py +44 -0
  26. stylegan_human/PP_HumanSeg/pretrained_model/download_pretrained_model.py +44 -0
  27. stylegan_human/README.md +229 -0
  28. stylegan_human/__init__.py +0 -0
  29. stylegan_human/alignment.py +223 -0
  30. stylegan_human/bg_white.py +57 -0
  31. stylegan_human/dnnlib/__init__.py +11 -0
  32. stylegan_human/dnnlib/tflib/__init__.py +20 -0
  33. stylegan_human/dnnlib/tflib/autosummary.py +193 -0
  34. stylegan_human/dnnlib/tflib/custom_ops.py +171 -0
  35. stylegan_human/dnnlib/tflib/network.py +592 -0
  36. stylegan_human/dnnlib/tflib/ops/__init__.py +9 -0
  37. stylegan_human/dnnlib/tflib/ops/fused_bias_act.cu +190 -0
  38. stylegan_human/dnnlib/tflib/ops/fused_bias_act.py +198 -0
  39. stylegan_human/dnnlib/tflib/ops/upfirdn_2d.cu +328 -0
  40. stylegan_human/dnnlib/tflib/ops/upfirdn_2d.py +366 -0
  41. stylegan_human/dnnlib/tflib/optimizer.py +338 -0
  42. stylegan_human/dnnlib/tflib/tfutil.py +254 -0
  43. stylegan_human/dnnlib/util.py +479 -0
  44. stylegan_human/docs/Dataset.md +74 -0
  45. stylegan_human/docs/SHHQ_Dataset_Release_Agreement.pdf +0 -0
  46. stylegan_human/edit.py +194 -0
  47. stylegan_human/edit/__init__.py +3 -0
  48. stylegan_human/edit/edit_config.py +16 -0
  49. stylegan_human/edit/edit_helper.py +215 -0
  50. stylegan_human/environment.yml +30 -0
.gitattributes ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *.gif filter=lfs diff=lfs merge=lfs -text
2
+ *.png filter=lfs diff=lfs merge=lfs -text
3
+ *.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by .ignore support plugin (hsz.mobi)
2
+ ### Python template
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ env/
14
+ build/
15
+ develop-eggs/
16
+ dist/
17
+ downloads/
18
+ eggs/
19
+ .eggs/
20
+ lib/
21
+ lib64/
22
+ parts/
23
+ sdist/
24
+ var/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .coverage
43
+ .coverage.*
44
+ .cache
45
+ nosetests.xml
46
+ coverage.xml
47
+ *,cover
48
+ .hypothesis/
49
+
50
+ # Translations
51
+ *.mo
52
+ *.pot
53
+
54
+ # Django stuff:
55
+ *.log
56
+ local_settings.py
57
+
58
+ # Flask stuff:
59
+ instance/
60
+ .webassets-cache
61
+
62
+ # Scrapy stuff:
63
+ .scrapy
64
+
65
+ # Sphinx documentation
66
+ docs/_build/
67
+
68
+ # PyBuilder
69
+ target/
70
+
71
+ # IPython Notebook
72
+ .ipynb_checkpoints
73
+
74
+ # pyenv
75
+ .python-version
76
+
77
+ # celery beat schedule file
78
+ celerybeat-schedule
79
+
80
+ # dotenv
81
+ .env
82
+
83
+ # virtualenv
84
+ venv/
85
+ ENV/
86
+
87
+ # Spyder project settings
88
+ .spyderproject
89
+
90
+ # Rope project settings
91
+ .ropeproject
92
+ ### VirtualEnv template
93
+ # Virtualenv
94
+ # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
95
+ .Python
96
+ [Bb]in
97
+ [Ii]nclude
98
+ [Ll]ib
99
+ [Ll]ib64
100
+ [Ll]ocal
101
+ [Ss]cripts
102
+ pyvenv.cfg
103
+ .venv
104
+ pip-selfcheck.json
105
+ ### JetBrains template
106
+ # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
107
+ # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
108
+
109
+ # User-specific stuff:
110
+ .idea/workspace.xml
111
+ .idea/tasks.xml
112
+ .idea/dictionaries
113
+ .idea/vcs.xml
114
+ .idea/jsLibraryMappings.xml
115
+
116
+ # Sensitive or high-churn files:
117
+ .idea/dataSources.ids
118
+ .idea/dataSources.xml
119
+ .idea/dataSources.local.xml
120
+ .idea/sqlDataSources.xml
121
+ .idea/dynamic.xml
122
+ .idea/uiDesigner.xml
123
+
124
+ # Gradle:
125
+ .idea/gradle.xml
126
+ .idea/libraries
127
+
128
+ # Mongo Explorer plugin:
129
+ .idea/mongoSettings.xml
130
+
131
+ .idea/
132
+
133
+ ## File-based project format:
134
+ *.iws
135
+
136
+ ## Plugin-specific files:
137
+
138
+ # IntelliJ
139
+ /out/
140
+
141
+ # mpeltonen/sbt-idea plugin
142
+ .idea_modules/
143
+
144
+ # JIRA plugin
145
+ atlassian-ide-plugin.xml
146
+
147
+ # Crashlytics plugin (for Android Studio and IntelliJ)
148
+ com_crashlytics_export_strings.xml
149
+ crashlytics.properties
150
+ crashlytics-build.properties
151
+ fabric.properties
152
+
153
+ # Mac related
154
+ .DS_Store
155
+
156
+ checkpoints
DragGAN.gif ADDED

Git LFS Details

  • SHA256: 2eab11d4dd1f11c2efacfcde385899b0164e241a7823eb050ab2e021f337225a
  • Pointer size: 133 Bytes
  • Size of remote file: 21.6 MB
LICENSE.txt ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2021, NVIDIA Corporation & affiliates. All rights reserved.
2
+
3
+
4
+ NVIDIA Source Code License for StyleGAN3
5
+
6
+
7
+ =======================================================================
8
+
9
+ 1. Definitions
10
+
11
+ "Licensor" means any person or entity that distributes its Work.
12
+
13
+ "Software" means the original work of authorship made available under
14
+ this License.
15
+
16
+ "Work" means the Software and any additions to or derivative works of
17
+ the Software that are made available under this License.
18
+
19
+ The terms "reproduce," "reproduction," "derivative works," and
20
+ "distribution" have the meaning as provided under U.S. copyright law;
21
+ provided, however, that for the purposes of this License, derivative
22
+ works shall not include works that remain separable from, or merely
23
+ link (or bind by name) to the interfaces of, the Work.
24
+
25
+ Works, including the Software, are "made available" under this License
26
+ by including in or with the Work either (a) a copyright notice
27
+ referencing the applicability of this License to the Work, or (b) a
28
+ copy of this License.
29
+
30
+ 2. License Grants
31
+
32
+ 2.1 Copyright Grant. Subject to the terms and conditions of this
33
+ License, each Licensor grants to you a perpetual, worldwide,
34
+ non-exclusive, royalty-free, copyright license to reproduce,
35
+ prepare derivative works of, publicly display, publicly perform,
36
+ sublicense and distribute its Work and any resulting derivative
37
+ works in any form.
38
+
39
+ 3. Limitations
40
+
41
+ 3.1 Redistribution. You may reproduce or distribute the Work only
42
+ if (a) you do so under this License, (b) you include a complete
43
+ copy of this License with your distribution, and (c) you retain
44
+ without modification any copyright, patent, trademark, or
45
+ attribution notices that are present in the Work.
46
+
47
+ 3.2 Derivative Works. You may specify that additional or different
48
+ terms apply to the use, reproduction, and distribution of your
49
+ derivative works of the Work ("Your Terms") only if (a) Your Terms
50
+ provide that the use limitation in Section 3.3 applies to your
51
+ derivative works, and (b) you identify the specific derivative
52
+ works that are subject to Your Terms. Notwithstanding Your Terms,
53
+ this License (including the redistribution requirements in Section
54
+ 3.1) will continue to apply to the Work itself.
55
+
56
+ 3.3 Use Limitation. The Work and any derivative works thereof only
57
+ may be used or intended for use non-commercially. Notwithstanding
58
+ the foregoing, NVIDIA and its affiliates may use the Work and any
59
+ derivative works commercially. As used herein, "non-commercially"
60
+ means for research or evaluation purposes only.
61
+
62
+ 3.4 Patent Claims. If you bring or threaten to bring a patent claim
63
+ against any Licensor (including any claim, cross-claim or
64
+ counterclaim in a lawsuit) to enforce any patents that you allege
65
+ are infringed by any Work, then your rights under this License from
66
+ such Licensor (including the grant in Section 2.1) will terminate
67
+ immediately.
68
+
69
+ 3.5 Trademarks. This License does not grant any rights to use any
70
+ Licensor’s or its affiliates’ names, logos, or trademarks, except
71
+ as necessary to reproduce the notices described in this License.
72
+
73
+ 3.6 Termination. If you violate any term of this License, then your
74
+ rights under this License (including the grant in Section 2.1) will
75
+ terminate immediately.
76
+
77
+ 4. Disclaimer of Warranty.
78
+
79
+ THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
80
+ KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
81
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
82
+ NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
83
+ THIS LICENSE.
84
+
85
+ 5. Limitation of Liability.
86
+
87
+ EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
88
+ THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
89
+ SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
90
+ INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
91
+ OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
92
+ (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
93
+ LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
94
+ COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
95
+ THE POSSIBILITY OF SUCH DAMAGES.
96
+
97
+ =======================================================================
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: DragGan
3
+ emoji: πŸ‘€
4
+ colorFrom: purple
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: visualizer_drag_gradio.py
9
+ pinned: false
10
+ ---
11
+
12
+
13
+ # Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold
14
+
15
+ <p align="center">
16
+ <img src="DragGAN.gif", width="700">
17
+ </p>
18
+
19
+ **Figure:** *Drag your GAN.*
20
+
21
+ > **Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold** <br>
22
+ > Xingang Pan, Ayush Tewari, Thomas LeimkΓΌhler, Lingjie Liu, Abhimitra Meka, Christian Theobalt<br>
23
+ > *SIGGRAPH 2023 Conference Proceedings*
24
+
25
+ ## Requirements
26
+
27
+ Please follow the requirements of [https://github.com/NVlabs/stylegan3](https://github.com/NVlabs/stylegan3).
28
+
29
+ ## Download pre-trained StyleGAN2 weights
30
+
31
+ To download pre-trained weights, simply run:
32
+ ```sh
33
+ sh scripts/download_model.sh
34
+ ```
35
+ If you want to try StyleGAN-Human and the Landscapes HQ (LHQ) dataset, please download weights from these links: [StyleGAN-Human](https://drive.google.com/file/d/1dlFEHbu-WzQWJl7nBBZYcTyo000H9hVm/view?usp=sharing), [LHQ](https://drive.google.com/file/d/16twEf0T9QINAEoMsWefoWiyhcTd-aiWc/view?usp=sharing), and put them under `./checkpoints`.
36
+
37
+ Feel free to try other pretrained StyleGAN.
38
+
39
+ ## Run DragGAN GUI
40
+
41
+ To start the DragGAN GUI, simply run:
42
+ ```sh
43
+ sh scripts/gui.sh
44
+ ```
45
+
46
+ This GUI supports editing GAN-generated images. To edit a real image, you need to first perform GAN inversion using tools like [PTI](https://github.com/danielroich/PTI). Then load the new latent code and model weights to the GUI.
47
+
48
+ You can run DragGAN Gradio demo as well:
49
+ ```sh
50
+ python visualizer_drag_gradio.py
51
+ ```
52
+
53
+ ## Acknowledgement
54
+
55
+ This code is developed based on [StyleGAN3](https://github.com/NVlabs/stylegan3). Part of the code is borrowed from [StyleGAN-Human](https://github.com/stylegan-human/StyleGAN-Human).
56
+
57
+ ## License
58
+
59
+ The code related to the DragGAN algorithm is licensed under [CC-BY-NC](https://creativecommons.org/licenses/by-nc/4.0/).
60
+ However, most of this project are available under a separate license terms: all codes used or modified from [StyleGAN3](https://github.com/NVlabs/stylegan3) is under the [Nvidia Source Code License](https://github.com/NVlabs/stylegan3/blob/main/LICENSE.txt).
61
+
62
+ Any form of use and derivative of this code must preserve the watermarking functionality.
63
+
64
+ ## BibTeX
65
+
66
+ ```bibtex
67
+ @inproceedings{pan2023draggan,
68
+ title={Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold},
69
+ author={Pan, Xingang and Tewari, Ayush, and Leimk{\"u}hler, Thomas and Liu, Lingjie and Meka, Abhimitra and Theobalt, Christian},
70
+ booktitle = {ACM SIGGRAPH 2023 Conference Proceedings},
71
+ year={2023}
72
+ }
73
+ ```
arial.ttf ADDED
Binary file (276 kB). View file
 
dnnlib/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ from .util import EasyDict, make_cache_dir_path
dnnlib/util.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ """Miscellaneous utility classes and functions."""
10
+
11
+ import ctypes
12
+ import fnmatch
13
+ import importlib
14
+ import inspect
15
+ import numpy as np
16
+ import os
17
+ import shutil
18
+ import sys
19
+ import types
20
+ import io
21
+ import pickle
22
+ import re
23
+ import requests
24
+ import html
25
+ import hashlib
26
+ import glob
27
+ import tempfile
28
+ import urllib
29
+ import urllib.request
30
+ import uuid
31
+
32
+ from distutils.util import strtobool
33
+ from typing import Any, List, Tuple, Union
34
+
35
+
36
+ # Util classes
37
+ # ------------------------------------------------------------------------------------------
38
+
39
+
40
+ class EasyDict(dict):
41
+ """Convenience class that behaves like a dict but allows access with the attribute syntax."""
42
+
43
+ def __getattr__(self, name: str) -> Any:
44
+ try:
45
+ return self[name]
46
+ except KeyError:
47
+ raise AttributeError(name)
48
+
49
+ def __setattr__(self, name: str, value: Any) -> None:
50
+ self[name] = value
51
+
52
+ def __delattr__(self, name: str) -> None:
53
+ del self[name]
54
+
55
+
56
+ class Logger(object):
57
+ """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
58
+
59
+ def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
60
+ self.file = None
61
+
62
+ if file_name is not None:
63
+ self.file = open(file_name, file_mode)
64
+
65
+ self.should_flush = should_flush
66
+ self.stdout = sys.stdout
67
+ self.stderr = sys.stderr
68
+
69
+ sys.stdout = self
70
+ sys.stderr = self
71
+
72
+ def __enter__(self) -> "Logger":
73
+ return self
74
+
75
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
76
+ self.close()
77
+
78
+ def write(self, text: Union[str, bytes]) -> None:
79
+ """Write text to stdout (and a file) and optionally flush."""
80
+ if isinstance(text, bytes):
81
+ text = text.decode()
82
+ if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
83
+ return
84
+
85
+ if self.file is not None:
86
+ self.file.write(text)
87
+
88
+ self.stdout.write(text)
89
+
90
+ if self.should_flush:
91
+ self.flush()
92
+
93
+ def flush(self) -> None:
94
+ """Flush written text to both stdout and a file, if open."""
95
+ if self.file is not None:
96
+ self.file.flush()
97
+
98
+ self.stdout.flush()
99
+
100
+ def close(self) -> None:
101
+ """Flush, close possible files, and remove stdout/stderr mirroring."""
102
+ self.flush()
103
+
104
+ # if using multiple loggers, prevent closing in wrong order
105
+ if sys.stdout is self:
106
+ sys.stdout = self.stdout
107
+ if sys.stderr is self:
108
+ sys.stderr = self.stderr
109
+
110
+ if self.file is not None:
111
+ self.file.close()
112
+ self.file = None
113
+
114
+
115
+ # Cache directories
116
+ # ------------------------------------------------------------------------------------------
117
+
118
+ _dnnlib_cache_dir = None
119
+
120
+ def set_cache_dir(path: str) -> None:
121
+ global _dnnlib_cache_dir
122
+ _dnnlib_cache_dir = path
123
+
124
+ def make_cache_dir_path(*paths: str) -> str:
125
+ if _dnnlib_cache_dir is not None:
126
+ return os.path.join(_dnnlib_cache_dir, *paths)
127
+ if 'DNNLIB_CACHE_DIR' in os.environ:
128
+ return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
129
+ if 'HOME' in os.environ:
130
+ return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
131
+ if 'USERPROFILE' in os.environ:
132
+ return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
133
+ return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
134
+
135
+ # Small util functions
136
+ # ------------------------------------------------------------------------------------------
137
+
138
+
139
+ def format_time(seconds: Union[int, float]) -> str:
140
+ """Convert the seconds to human readable string with days, hours, minutes and seconds."""
141
+ s = int(np.rint(seconds))
142
+
143
+ if s < 60:
144
+ return "{0}s".format(s)
145
+ elif s < 60 * 60:
146
+ return "{0}m {1:02}s".format(s // 60, s % 60)
147
+ elif s < 24 * 60 * 60:
148
+ return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
149
+ else:
150
+ return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
151
+
152
+
153
+ def format_time_brief(seconds: Union[int, float]) -> str:
154
+ """Convert the seconds to human readable string with days, hours, minutes and seconds."""
155
+ s = int(np.rint(seconds))
156
+
157
+ if s < 60:
158
+ return "{0}s".format(s)
159
+ elif s < 60 * 60:
160
+ return "{0}m {1:02}s".format(s // 60, s % 60)
161
+ elif s < 24 * 60 * 60:
162
+ return "{0}h {1:02}m".format(s // (60 * 60), (s // 60) % 60)
163
+ else:
164
+ return "{0}d {1:02}h".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24)
165
+
166
+
167
+ def ask_yes_no(question: str) -> bool:
168
+ """Ask the user the question until the user inputs a valid answer."""
169
+ while True:
170
+ try:
171
+ print("{0} [y/n]".format(question))
172
+ return strtobool(input().lower())
173
+ except ValueError:
174
+ pass
175
+
176
+
177
+ def tuple_product(t: Tuple) -> Any:
178
+ """Calculate the product of the tuple elements."""
179
+ result = 1
180
+
181
+ for v in t:
182
+ result *= v
183
+
184
+ return result
185
+
186
+
187
+ _str_to_ctype = {
188
+ "uint8": ctypes.c_ubyte,
189
+ "uint16": ctypes.c_uint16,
190
+ "uint32": ctypes.c_uint32,
191
+ "uint64": ctypes.c_uint64,
192
+ "int8": ctypes.c_byte,
193
+ "int16": ctypes.c_int16,
194
+ "int32": ctypes.c_int32,
195
+ "int64": ctypes.c_int64,
196
+ "float32": ctypes.c_float,
197
+ "float64": ctypes.c_double
198
+ }
199
+
200
+
201
+ def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
202
+ """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
203
+ type_str = None
204
+
205
+ if isinstance(type_obj, str):
206
+ type_str = type_obj
207
+ elif hasattr(type_obj, "__name__"):
208
+ type_str = type_obj.__name__
209
+ elif hasattr(type_obj, "name"):
210
+ type_str = type_obj.name
211
+ else:
212
+ raise RuntimeError("Cannot infer type name from input")
213
+
214
+ assert type_str in _str_to_ctype.keys()
215
+
216
+ my_dtype = np.dtype(type_str)
217
+ my_ctype = _str_to_ctype[type_str]
218
+
219
+ assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
220
+
221
+ return my_dtype, my_ctype
222
+
223
+
224
+ def is_pickleable(obj: Any) -> bool:
225
+ try:
226
+ with io.BytesIO() as stream:
227
+ pickle.dump(obj, stream)
228
+ return True
229
+ except:
230
+ return False
231
+
232
+
233
+ # Functionality to import modules/objects by name, and call functions by name
234
+ # ------------------------------------------------------------------------------------------
235
+
236
+ def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
237
+ """Searches for the underlying module behind the name to some python object.
238
+ Returns the module and the object name (original name with module part removed)."""
239
+
240
+ # allow convenience shorthands, substitute them by full names
241
+ obj_name = re.sub("^np.", "numpy.", obj_name)
242
+ obj_name = re.sub("^tf.", "tensorflow.", obj_name)
243
+
244
+ # list alternatives for (module_name, local_obj_name)
245
+ parts = obj_name.split(".")
246
+ name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
247
+
248
+ # try each alternative in turn
249
+ for module_name, local_obj_name in name_pairs:
250
+ try:
251
+ module = importlib.import_module(module_name) # may raise ImportError
252
+ get_obj_from_module(module, local_obj_name) # may raise AttributeError
253
+ return module, local_obj_name
254
+ except:
255
+ pass
256
+
257
+ # maybe some of the modules themselves contain errors?
258
+ for module_name, _local_obj_name in name_pairs:
259
+ try:
260
+ importlib.import_module(module_name) # may raise ImportError
261
+ except ImportError:
262
+ if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
263
+ raise
264
+
265
+ # maybe the requested attribute is missing?
266
+ for module_name, local_obj_name in name_pairs:
267
+ try:
268
+ module = importlib.import_module(module_name) # may raise ImportError
269
+ get_obj_from_module(module, local_obj_name) # may raise AttributeError
270
+ except ImportError:
271
+ pass
272
+
273
+ # we are out of luck, but we have no idea why
274
+ raise ImportError(obj_name)
275
+
276
+
277
+ def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
278
+ """Traverses the object name and returns the last (rightmost) python object."""
279
+ if obj_name == '':
280
+ return module
281
+ obj = module
282
+ for part in obj_name.split("."):
283
+ obj = getattr(obj, part)
284
+ return obj
285
+
286
+
287
+ def get_obj_by_name(name: str) -> Any:
288
+ """Finds the python object with the given name."""
289
+ module, obj_name = get_module_from_obj_name(name)
290
+ return get_obj_from_module(module, obj_name)
291
+
292
+
293
+ def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
294
+ """Finds the python object with the given name and calls it as a function."""
295
+ assert func_name is not None
296
+ func_obj = get_obj_by_name(func_name)
297
+ assert callable(func_obj)
298
+ return func_obj(*args, **kwargs)
299
+
300
+
301
+ def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
302
+ """Finds the python class with the given name and constructs it with the given arguments."""
303
+ return call_func_by_name(*args, func_name=class_name, **kwargs)
304
+
305
+
306
+ def get_module_dir_by_obj_name(obj_name: str) -> str:
307
+ """Get the directory path of the module containing the given object name."""
308
+ module, _ = get_module_from_obj_name(obj_name)
309
+ return os.path.dirname(inspect.getfile(module))
310
+
311
+
312
+ def is_top_level_function(obj: Any) -> bool:
313
+ """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
314
+ return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
315
+
316
+
317
+ def get_top_level_function_name(obj: Any) -> str:
318
+ """Return the fully-qualified name of a top-level function."""
319
+ assert is_top_level_function(obj)
320
+ module = obj.__module__
321
+ if module == '__main__':
322
+ module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
323
+ return module + "." + obj.__name__
324
+
325
+
326
+ # File system helpers
327
+ # ------------------------------------------------------------------------------------------
328
+
329
+ def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
330
+ """List all files recursively in a given directory while ignoring given file and directory names.
331
+ Returns list of tuples containing both absolute and relative paths."""
332
+ assert os.path.isdir(dir_path)
333
+ base_name = os.path.basename(os.path.normpath(dir_path))
334
+
335
+ if ignores is None:
336
+ ignores = []
337
+
338
+ result = []
339
+
340
+ for root, dirs, files in os.walk(dir_path, topdown=True):
341
+ for ignore_ in ignores:
342
+ dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
343
+
344
+ # dirs need to be edited in-place
345
+ for d in dirs_to_remove:
346
+ dirs.remove(d)
347
+
348
+ files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
349
+
350
+ absolute_paths = [os.path.join(root, f) for f in files]
351
+ relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
352
+
353
+ if add_base_to_relative:
354
+ relative_paths = [os.path.join(base_name, p) for p in relative_paths]
355
+
356
+ assert len(absolute_paths) == len(relative_paths)
357
+ result += zip(absolute_paths, relative_paths)
358
+
359
+ return result
360
+
361
+
362
+ def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
363
+ """Takes in a list of tuples of (src, dst) paths and copies files.
364
+ Will create all necessary directories."""
365
+ for file in files:
366
+ target_dir_name = os.path.dirname(file[1])
367
+
368
+ # will create all intermediate-level directories
369
+ if not os.path.exists(target_dir_name):
370
+ os.makedirs(target_dir_name)
371
+
372
+ shutil.copyfile(file[0], file[1])
373
+
374
+
375
+ # URL helpers
376
+ # ------------------------------------------------------------------------------------------
377
+
378
+ def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
379
+ """Determine whether the given object is a valid URL string."""
380
+ if not isinstance(obj, str) or not "://" in obj:
381
+ return False
382
+ if allow_file_urls and obj.startswith('file://'):
383
+ return True
384
+ try:
385
+ res = requests.compat.urlparse(obj)
386
+ if not res.scheme or not res.netloc or not "." in res.netloc:
387
+ return False
388
+ res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
389
+ if not res.scheme or not res.netloc or not "." in res.netloc:
390
+ return False
391
+ except:
392
+ return False
393
+ return True
394
+
395
+
396
+ def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
397
+ """Download the given URL and return a binary-mode file object to access the data."""
398
+ assert num_attempts >= 1
399
+ assert not (return_filename and (not cache))
400
+
401
+ # Doesn't look like an URL scheme so interpret it as a local filename.
402
+ if not re.match('^[a-z]+://', url):
403
+ return url if return_filename else open(url, "rb")
404
+
405
+ # Handle file URLs. This code handles unusual file:// patterns that
406
+ # arise on Windows:
407
+ #
408
+ # file:///c:/foo.txt
409
+ #
410
+ # which would translate to a local '/c:/foo.txt' filename that's
411
+ # invalid. Drop the forward slash for such pathnames.
412
+ #
413
+ # If you touch this code path, you should test it on both Linux and
414
+ # Windows.
415
+ #
416
+ # Some internet resources suggest using urllib.request.url2pathname() but
417
+ # but that converts forward slashes to backslashes and this causes
418
+ # its own set of problems.
419
+ if url.startswith('file://'):
420
+ filename = urllib.parse.urlparse(url).path
421
+ if re.match(r'^/[a-zA-Z]:', filename):
422
+ filename = filename[1:]
423
+ return filename if return_filename else open(filename, "rb")
424
+
425
+ assert is_url(url)
426
+
427
+ # Lookup from cache.
428
+ if cache_dir is None:
429
+ cache_dir = make_cache_dir_path('downloads')
430
+
431
+ url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
432
+ if cache:
433
+ cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
434
+ if len(cache_files) == 1:
435
+ filename = cache_files[0]
436
+ return filename if return_filename else open(filename, "rb")
437
+
438
+ # Download.
439
+ url_name = None
440
+ url_data = None
441
+ with requests.Session() as session:
442
+ if verbose:
443
+ print("Downloading %s ..." % url, end="", flush=True)
444
+ for attempts_left in reversed(range(num_attempts)):
445
+ try:
446
+ with session.get(url) as res:
447
+ res.raise_for_status()
448
+ if len(res.content) == 0:
449
+ raise IOError("No data received")
450
+
451
+ if len(res.content) < 8192:
452
+ content_str = res.content.decode("utf-8")
453
+ if "download_warning" in res.headers.get("Set-Cookie", ""):
454
+ links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
455
+ if len(links) == 1:
456
+ url = requests.compat.urljoin(url, links[0])
457
+ raise IOError("Google Drive virus checker nag")
458
+ if "Google Drive - Quota exceeded" in content_str:
459
+ raise IOError("Google Drive download quota exceeded -- please try again later")
460
+
461
+ match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
462
+ url_name = match[1] if match else url
463
+ url_data = res.content
464
+ if verbose:
465
+ print(" done")
466
+ break
467
+ except KeyboardInterrupt:
468
+ raise
469
+ except:
470
+ if not attempts_left:
471
+ if verbose:
472
+ print(" failed")
473
+ raise
474
+ if verbose:
475
+ print(".", end="", flush=True)
476
+
477
+ # Save to cache.
478
+ if cache:
479
+ safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
480
+ cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
481
+ temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
482
+ os.makedirs(cache_dir, exist_ok=True)
483
+ with open(temp_file, "wb") as f:
484
+ f.write(url_data)
485
+ os.replace(temp_file, cache_file) # atomic
486
+ if return_filename:
487
+ return cache_file
488
+
489
+ # Return data as file object.
490
+ assert not return_filename
491
+ return io.BytesIO(url_data)
environment.yml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: stylegan3
2
+ channels:
3
+ - pytorch
4
+ - nvidia
5
+ dependencies:
6
+ - python >= 3.8
7
+ - pip
8
+ - numpy>=1.20
9
+ - click>=8.0
10
+ - pillow=8.3.1
11
+ - scipy=1.7.1
12
+ - pytorch=1.9.1
13
+ - cudatoolkit=11.1
14
+ - requests=2.26.0
15
+ - tqdm=4.62.2
16
+ - ninja=1.10.2
17
+ - matplotlib=3.4.2
18
+ - imageio=2.9.0
19
+ - pip:
20
+ - imgui==1.3.0
21
+ - glfw==2.2.0
22
+ - pyopengl==3.1.5
23
+ - imageio-ffmpeg==0.4.3
24
+ - pyspng
gen_images.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ """Generate images using pretrained network pickle."""
10
+
11
+ import os
12
+ import re
13
+ from typing import List, Optional, Tuple, Union
14
+
15
+ import click
16
+ import dnnlib
17
+ import numpy as np
18
+ import PIL.Image
19
+ import torch
20
+
21
+ import legacy
22
+
23
+ #----------------------------------------------------------------------------
24
+
25
+ def parse_range(s: Union[str, List]) -> List[int]:
26
+ '''Parse a comma separated list of numbers or ranges and return a list of ints.
27
+
28
+ Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
29
+ '''
30
+ if isinstance(s, list): return s
31
+ ranges = []
32
+ range_re = re.compile(r'^(\d+)-(\d+)$')
33
+ for p in s.split(','):
34
+ m = range_re.match(p)
35
+ if m:
36
+ ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
37
+ else:
38
+ ranges.append(int(p))
39
+ return ranges
40
+
41
+ #----------------------------------------------------------------------------
42
+
43
+ def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]:
44
+ '''Parse a floating point 2-vector of syntax 'a,b'.
45
+
46
+ Example:
47
+ '0,1' returns (0,1)
48
+ '''
49
+ if isinstance(s, tuple): return s
50
+ parts = s.split(',')
51
+ if len(parts) == 2:
52
+ return (float(parts[0]), float(parts[1]))
53
+ raise ValueError(f'cannot parse 2-vector {s}')
54
+
55
+ #----------------------------------------------------------------------------
56
+
57
+ def make_transform(translate: Tuple[float,float], angle: float):
58
+ m = np.eye(3)
59
+ s = np.sin(angle/360.0*np.pi*2)
60
+ c = np.cos(angle/360.0*np.pi*2)
61
+ m[0][0] = c
62
+ m[0][1] = s
63
+ m[0][2] = translate[0]
64
+ m[1][0] = -s
65
+ m[1][1] = c
66
+ m[1][2] = translate[1]
67
+ return m
68
+
69
+ #----------------------------------------------------------------------------
70
+
71
+ @click.command()
72
+ @click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
73
+ @click.option('--seeds', type=parse_range, help='List of random seeds (e.g., \'0,1,4-6\')', required=True)
74
+ @click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
75
+ @click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
76
+ @click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
77
+ @click.option('--translate', help='Translate XY-coordinate (e.g. \'0.3,1\')', type=parse_vec2, default='0,0', show_default=True, metavar='VEC2')
78
+ @click.option('--rotate', help='Rotation angle in degrees', type=float, default=0, show_default=True, metavar='ANGLE')
79
+ @click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
80
+ def generate_images(
81
+ network_pkl: str,
82
+ seeds: List[int],
83
+ truncation_psi: float,
84
+ noise_mode: str,
85
+ outdir: str,
86
+ translate: Tuple[float,float],
87
+ rotate: float,
88
+ class_idx: Optional[int]
89
+ ):
90
+ """Generate images using pretrained network pickle.
91
+
92
+ Examples:
93
+
94
+ \b
95
+ # Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left).
96
+ python gen_images.py --outdir=out --trunc=1 --seeds=2 \\
97
+ --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl
98
+
99
+ \b
100
+ # Generate uncurated images with truncation using the MetFaces-U dataset
101
+ python gen_images.py --outdir=out --trunc=0.7 --seeds=600-605 \\
102
+ --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl
103
+ """
104
+
105
+ print('Loading networks from "%s"...' % network_pkl)
106
+ device = torch.device('cuda')
107
+ with dnnlib.util.open_url(network_pkl) as f:
108
+ G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
109
+ # import pickle
110
+ # G = legacy.load_network_pkl(f)
111
+ # output = open('checkpoints/stylegan2-car-config-f-pt.pkl', 'wb')
112
+ # pickle.dump(G, output)
113
+
114
+ os.makedirs(outdir, exist_ok=True)
115
+
116
+ # Labels.
117
+ label = torch.zeros([1, G.c_dim], device=device)
118
+ if G.c_dim != 0:
119
+ if class_idx is None:
120
+ raise click.ClickException('Must specify class label with --class when using a conditional network')
121
+ label[:, class_idx] = 1
122
+ else:
123
+ if class_idx is not None:
124
+ print ('warn: --class=lbl ignored when running on an unconditional network')
125
+
126
+ # Generate images.
127
+ for seed_idx, seed in enumerate(seeds):
128
+ print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
129
+ z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
130
+
131
+ # Construct an inverse rotation/translation matrix and pass to the generator. The
132
+ # generator expects this matrix as an inverse to avoid potentially failing numerical
133
+ # operations in the network.
134
+ if hasattr(G.synthesis, 'input'):
135
+ m = make_transform(translate, rotate)
136
+ m = np.linalg.inv(m)
137
+ G.synthesis.input.transform.copy_(torch.from_numpy(m))
138
+
139
+ img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
140
+ img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
141
+ PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png')
142
+
143
+
144
+ #----------------------------------------------------------------------------
145
+
146
+ if __name__ == "__main__":
147
+ generate_images() # pylint: disable=no-value-for-parameter
148
+
149
+ #----------------------------------------------------------------------------
gradio_utils/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .utils import (ImageMask, draw_mask_on_image, draw_points_on_image,
2
+ get_latest_points_pair, get_valid_mask,
3
+ on_change_single_global_state)
4
+
5
+ __all__ = [
6
+ 'draw_mask_on_image', 'draw_points_on_image',
7
+ 'on_change_single_global_state', 'get_latest_points_pair',
8
+ 'get_valid_mask', 'ImageMask'
9
+ ]
gradio_utils/utils.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from PIL import Image, ImageDraw
4
+
5
+
6
+ class ImageMask(gr.components.Image):
7
+ """
8
+ Sets: source="canvas", tool="sketch"
9
+ """
10
+
11
+ is_template = True
12
+
13
+ def __init__(self, **kwargs):
14
+ super().__init__(source="upload",
15
+ tool="sketch",
16
+ interactive=False,
17
+ **kwargs)
18
+
19
+ def preprocess(self, x):
20
+ if x is None:
21
+ return x
22
+ if self.tool == "sketch" and self.source in ["upload", "webcam"
23
+ ] and type(x) != dict:
24
+ decode_image = gr.processing_utils.decode_base64_to_image(x)
25
+ width, height = decode_image.size
26
+ mask = np.ones((height, width, 4), dtype=np.uint8)
27
+ mask[..., -1] = 255
28
+ mask = self.postprocess(mask)
29
+ x = {'image': x, 'mask': mask}
30
+ return super().preprocess(x)
31
+
32
+
33
+ def get_valid_mask(mask: np.ndarray):
34
+ """Convert mask from gr.Image(0 to 255, RGBA) to binary mask.
35
+ """
36
+ if mask.ndim == 3:
37
+ mask_pil = Image.fromarray(mask).convert('L')
38
+ mask = np.array(mask_pil)
39
+ if mask.max() == 255:
40
+ mask = mask / 255
41
+ return mask
42
+
43
+
44
+ def draw_points_on_image(image,
45
+ points,
46
+ curr_point=None,
47
+ highlight_all=True,
48
+ radius_scale=0.01):
49
+ overlay_rgba = Image.new("RGBA", image.size, 0)
50
+ overlay_draw = ImageDraw.Draw(overlay_rgba)
51
+ for point_key, point in points.items():
52
+ if ((curr_point is not None and curr_point == point_key)
53
+ or highlight_all):
54
+ p_color = (255, 0, 0)
55
+ t_color = (0, 0, 255)
56
+
57
+ else:
58
+ p_color = (255, 0, 0, 35)
59
+ t_color = (0, 0, 255, 35)
60
+
61
+ rad_draw = int(image.size[0] * radius_scale)
62
+
63
+ p_start = point.get("start_temp", point["start"])
64
+ p_target = point["target"]
65
+
66
+ if p_start is not None and p_target is not None:
67
+ p_draw = int(p_start[0]), int(p_start[1])
68
+ t_draw = int(p_target[0]), int(p_target[1])
69
+
70
+ overlay_draw.line(
71
+ (p_draw[0], p_draw[1], t_draw[0], t_draw[1]),
72
+ fill=(255, 255, 0),
73
+ width=2,
74
+ )
75
+
76
+ if p_start is not None:
77
+ p_draw = int(p_start[0]), int(p_start[1])
78
+ overlay_draw.ellipse(
79
+ (
80
+ p_draw[0] - rad_draw,
81
+ p_draw[1] - rad_draw,
82
+ p_draw[0] + rad_draw,
83
+ p_draw[1] + rad_draw,
84
+ ),
85
+ fill=p_color,
86
+ )
87
+
88
+ if curr_point is not None and curr_point == point_key:
89
+ # overlay_draw.text(p_draw, "p", font=font, align="center", fill=(0, 0, 0))
90
+ overlay_draw.text(p_draw, "p", align="center", fill=(0, 0, 0))
91
+
92
+ if p_target is not None:
93
+ t_draw = int(p_target[0]), int(p_target[1])
94
+ overlay_draw.ellipse(
95
+ (
96
+ t_draw[0] - rad_draw,
97
+ t_draw[1] - rad_draw,
98
+ t_draw[0] + rad_draw,
99
+ t_draw[1] + rad_draw,
100
+ ),
101
+ fill=t_color,
102
+ )
103
+
104
+ if curr_point is not None and curr_point == point_key:
105
+ # overlay_draw.text(t_draw, "t", font=font, align="center", fill=(0, 0, 0))
106
+ overlay_draw.text(t_draw, "t", align="center", fill=(0, 0, 0))
107
+
108
+ return Image.alpha_composite(image.convert("RGBA"),
109
+ overlay_rgba).convert("RGB")
110
+
111
+
112
+ def draw_mask_on_image(image, mask):
113
+ im_mask = np.uint8(mask * 255)
114
+ im_mask_rgba = np.concatenate(
115
+ (
116
+ np.tile(im_mask[..., None], [1, 1, 3]),
117
+ 45 * np.ones(
118
+ (im_mask.shape[0], im_mask.shape[1], 1), dtype=np.uint8),
119
+ ),
120
+ axis=-1,
121
+ )
122
+ im_mask_rgba = Image.fromarray(im_mask_rgba).convert("RGBA")
123
+
124
+ return Image.alpha_composite(image.convert("RGBA"),
125
+ im_mask_rgba).convert("RGB")
126
+
127
+
128
+ def on_change_single_global_state(keys,
129
+ value,
130
+ global_state,
131
+ map_transform=None):
132
+ if map_transform is not None:
133
+ value = map_transform(value)
134
+
135
+ curr_state = global_state
136
+ if isinstance(keys, str):
137
+ last_key = keys
138
+
139
+ else:
140
+ for k in keys[:-1]:
141
+ curr_state = curr_state[k]
142
+
143
+ last_key = keys[-1]
144
+
145
+ curr_state[last_key] = value
146
+ return global_state
147
+
148
+
149
+ def get_latest_points_pair(points_dict):
150
+ if not points_dict:
151
+ return None
152
+ point_idx = list(points_dict.keys())
153
+ latest_point_idx = max(point_idx)
154
+ return latest_point_idx
gui_utils/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ # empty
gui_utils/gl_utils.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ import math
10
+ import os
11
+ import functools
12
+ import contextlib
13
+ import numpy as np
14
+ import OpenGL.GL as gl
15
+ import OpenGL.GL.ARB.texture_float
16
+ import dnnlib
17
+
18
+ #----------------------------------------------------------------------------
19
+
20
+ def init_egl():
21
+ assert os.environ['PYOPENGL_PLATFORM'] == 'egl' # Must be set before importing OpenGL.
22
+ import OpenGL.EGL as egl
23
+ import ctypes
24
+
25
+ # Initialize EGL.
26
+ display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)
27
+ assert display != egl.EGL_NO_DISPLAY
28
+ major = ctypes.c_int32()
29
+ minor = ctypes.c_int32()
30
+ ok = egl.eglInitialize(display, major, minor)
31
+ assert ok
32
+ assert major.value * 10 + minor.value >= 14
33
+
34
+ # Choose config.
35
+ config_attribs = [
36
+ egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT,
37
+ egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT,
38
+ egl.EGL_NONE
39
+ ]
40
+ configs = (ctypes.c_int32 * 1)()
41
+ num_configs = ctypes.c_int32()
42
+ ok = egl.eglChooseConfig(display, config_attribs, configs, 1, num_configs)
43
+ assert ok
44
+ assert num_configs.value == 1
45
+ config = configs[0]
46
+
47
+ # Create dummy pbuffer surface.
48
+ surface_attribs = [
49
+ egl.EGL_WIDTH, 1,
50
+ egl.EGL_HEIGHT, 1,
51
+ egl.EGL_NONE
52
+ ]
53
+ surface = egl.eglCreatePbufferSurface(display, config, surface_attribs)
54
+ assert surface != egl.EGL_NO_SURFACE
55
+
56
+ # Setup GL context.
57
+ ok = egl.eglBindAPI(egl.EGL_OPENGL_API)
58
+ assert ok
59
+ context = egl.eglCreateContext(display, config, egl.EGL_NO_CONTEXT, None)
60
+ assert context != egl.EGL_NO_CONTEXT
61
+ ok = egl.eglMakeCurrent(display, surface, surface, context)
62
+ assert ok
63
+
64
+ #----------------------------------------------------------------------------
65
+
66
+ _texture_formats = {
67
+ ('uint8', 1): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_LUMINANCE, internalformat=gl.GL_LUMINANCE8),
68
+ ('uint8', 2): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_LUMINANCE_ALPHA, internalformat=gl.GL_LUMINANCE8_ALPHA8),
69
+ ('uint8', 3): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_RGB, internalformat=gl.GL_RGB8),
70
+ ('uint8', 4): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_RGBA, internalformat=gl.GL_RGBA8),
71
+ ('float32', 1): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_LUMINANCE, internalformat=OpenGL.GL.ARB.texture_float.GL_LUMINANCE32F_ARB),
72
+ ('float32', 2): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_LUMINANCE_ALPHA, internalformat=OpenGL.GL.ARB.texture_float.GL_LUMINANCE_ALPHA32F_ARB),
73
+ ('float32', 3): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_RGB, internalformat=gl.GL_RGB32F),
74
+ ('float32', 4): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_RGBA, internalformat=gl.GL_RGBA32F),
75
+ }
76
+
77
+ def get_texture_format(dtype, channels):
78
+ return _texture_formats[(np.dtype(dtype).name, int(channels))]
79
+
80
+ #----------------------------------------------------------------------------
81
+
82
+ def prepare_texture_data(image):
83
+ image = np.asarray(image)
84
+ if image.ndim == 2:
85
+ image = image[:, :, np.newaxis]
86
+ if image.dtype.name == 'float64':
87
+ image = image.astype('float32')
88
+ return image
89
+
90
+ #----------------------------------------------------------------------------
91
+
92
+ def draw_pixels(image, *, pos=0, zoom=1, align=0, rint=True):
93
+ pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
94
+ zoom = np.broadcast_to(np.asarray(zoom, dtype='float32'), [2])
95
+ align = np.broadcast_to(np.asarray(align, dtype='float32'), [2])
96
+ image = prepare_texture_data(image)
97
+ height, width, channels = image.shape
98
+ size = zoom * [width, height]
99
+ pos = pos - size * align
100
+ if rint:
101
+ pos = np.rint(pos)
102
+ fmt = get_texture_format(image.dtype, channels)
103
+
104
+ gl.glPushAttrib(gl.GL_CURRENT_BIT | gl.GL_PIXEL_MODE_BIT)
105
+ gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT)
106
+ gl.glRasterPos2f(pos[0], pos[1])
107
+ gl.glPixelZoom(zoom[0], -zoom[1])
108
+ gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
109
+ gl.glDrawPixels(width, height, fmt.format, fmt.type, image)
110
+ gl.glPopClientAttrib()
111
+ gl.glPopAttrib()
112
+
113
+ #----------------------------------------------------------------------------
114
+
115
+ def read_pixels(width, height, *, pos=0, dtype='uint8', channels=3):
116
+ pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
117
+ dtype = np.dtype(dtype)
118
+ fmt = get_texture_format(dtype, channels)
119
+ image = np.empty([height, width, channels], dtype=dtype)
120
+
121
+ gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT)
122
+ gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
123
+ gl.glReadPixels(int(np.round(pos[0])), int(np.round(pos[1])), width, height, fmt.format, fmt.type, image)
124
+ gl.glPopClientAttrib()
125
+ return np.flipud(image)
126
+
127
+ #----------------------------------------------------------------------------
128
+
129
+ class Texture:
130
+ def __init__(self, *, image=None, width=None, height=None, channels=None, dtype=None, bilinear=True, mipmap=True):
131
+ self.gl_id = None
132
+ self.bilinear = bilinear
133
+ self.mipmap = mipmap
134
+
135
+ # Determine size and dtype.
136
+ if image is not None:
137
+ image = prepare_texture_data(image)
138
+ self.height, self.width, self.channels = image.shape
139
+ self.dtype = image.dtype
140
+ else:
141
+ assert width is not None and height is not None
142
+ self.width = width
143
+ self.height = height
144
+ self.channels = channels if channels is not None else 3
145
+ self.dtype = np.dtype(dtype) if dtype is not None else np.uint8
146
+
147
+ # Validate size and dtype.
148
+ assert isinstance(self.width, int) and self.width >= 0
149
+ assert isinstance(self.height, int) and self.height >= 0
150
+ assert isinstance(self.channels, int) and self.channels >= 1
151
+ assert self.is_compatible(width=width, height=height, channels=channels, dtype=dtype)
152
+
153
+ # Create texture object.
154
+ self.gl_id = gl.glGenTextures(1)
155
+ with self.bind():
156
+ gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
157
+ gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)
158
+ gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR if self.bilinear else gl.GL_NEAREST)
159
+ gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR_MIPMAP_LINEAR if self.mipmap else gl.GL_NEAREST)
160
+ self.update(image)
161
+
162
+ def delete(self):
163
+ if self.gl_id is not None:
164
+ gl.glDeleteTextures([self.gl_id])
165
+ self.gl_id = None
166
+
167
+ def __del__(self):
168
+ try:
169
+ self.delete()
170
+ except:
171
+ pass
172
+
173
+ @contextlib.contextmanager
174
+ def bind(self):
175
+ prev_id = gl.glGetInteger(gl.GL_TEXTURE_BINDING_2D)
176
+ gl.glBindTexture(gl.GL_TEXTURE_2D, self.gl_id)
177
+ yield
178
+ gl.glBindTexture(gl.GL_TEXTURE_2D, prev_id)
179
+
180
+ def update(self, image):
181
+ if image is not None:
182
+ image = prepare_texture_data(image)
183
+ assert self.is_compatible(image=image)
184
+ with self.bind():
185
+ fmt = get_texture_format(self.dtype, self.channels)
186
+ gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT)
187
+ gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
188
+ gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, fmt.internalformat, self.width, self.height, 0, fmt.format, fmt.type, image)
189
+ if self.mipmap:
190
+ gl.glGenerateMipmap(gl.GL_TEXTURE_2D)
191
+ gl.glPopClientAttrib()
192
+
193
+ def draw(self, *, pos=0, zoom=1, align=0, rint=False, color=1, alpha=1, rounding=0):
194
+ zoom = np.broadcast_to(np.asarray(zoom, dtype='float32'), [2])
195
+ size = zoom * [self.width, self.height]
196
+ with self.bind():
197
+ gl.glPushAttrib(gl.GL_ENABLE_BIT)
198
+ gl.glEnable(gl.GL_TEXTURE_2D)
199
+ draw_rect(pos=pos, size=size, align=align, rint=rint, color=color, alpha=alpha, rounding=rounding)
200
+ gl.glPopAttrib()
201
+
202
+ def is_compatible(self, *, image=None, width=None, height=None, channels=None, dtype=None): # pylint: disable=too-many-return-statements
203
+ if image is not None:
204
+ if image.ndim != 3:
205
+ return False
206
+ ih, iw, ic = image.shape
207
+ if not self.is_compatible(width=iw, height=ih, channels=ic, dtype=image.dtype):
208
+ return False
209
+ if width is not None and self.width != width:
210
+ return False
211
+ if height is not None and self.height != height:
212
+ return False
213
+ if channels is not None and self.channels != channels:
214
+ return False
215
+ if dtype is not None and self.dtype != dtype:
216
+ return False
217
+ return True
218
+
219
+ #----------------------------------------------------------------------------
220
+
221
+ class Framebuffer:
222
+ def __init__(self, *, texture=None, width=None, height=None, channels=None, dtype=None, msaa=0):
223
+ self.texture = texture
224
+ self.gl_id = None
225
+ self.gl_color = None
226
+ self.gl_depth_stencil = None
227
+ self.msaa = msaa
228
+
229
+ # Determine size and dtype.
230
+ if texture is not None:
231
+ assert isinstance(self.texture, Texture)
232
+ self.width = texture.width
233
+ self.height = texture.height
234
+ self.channels = texture.channels
235
+ self.dtype = texture.dtype
236
+ else:
237
+ assert width is not None and height is not None
238
+ self.width = width
239
+ self.height = height
240
+ self.channels = channels if channels is not None else 4
241
+ self.dtype = np.dtype(dtype) if dtype is not None else np.float32
242
+
243
+ # Validate size and dtype.
244
+ assert isinstance(self.width, int) and self.width >= 0
245
+ assert isinstance(self.height, int) and self.height >= 0
246
+ assert isinstance(self.channels, int) and self.channels >= 1
247
+ assert width is None or width == self.width
248
+ assert height is None or height == self.height
249
+ assert channels is None or channels == self.channels
250
+ assert dtype is None or dtype == self.dtype
251
+
252
+ # Create framebuffer object.
253
+ self.gl_id = gl.glGenFramebuffers(1)
254
+ with self.bind():
255
+
256
+ # Setup color buffer.
257
+ if self.texture is not None:
258
+ assert self.msaa == 0
259
+ gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.texture.gl_id, 0)
260
+ else:
261
+ fmt = get_texture_format(self.dtype, self.channels)
262
+ self.gl_color = gl.glGenRenderbuffers(1)
263
+ gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.gl_color)
264
+ gl.glRenderbufferStorageMultisample(gl.GL_RENDERBUFFER, self.msaa, fmt.internalformat, self.width, self.height)
265
+ gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_RENDERBUFFER, self.gl_color)
266
+
267
+ # Setup depth/stencil buffer.
268
+ self.gl_depth_stencil = gl.glGenRenderbuffers(1)
269
+ gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.gl_depth_stencil)
270
+ gl.glRenderbufferStorageMultisample(gl.GL_RENDERBUFFER, self.msaa, gl.GL_DEPTH24_STENCIL8, self.width, self.height)
271
+ gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_STENCIL_ATTACHMENT, gl.GL_RENDERBUFFER, self.gl_depth_stencil)
272
+
273
+ def delete(self):
274
+ if self.gl_id is not None:
275
+ gl.glDeleteFramebuffers([self.gl_id])
276
+ self.gl_id = None
277
+ if self.gl_color is not None:
278
+ gl.glDeleteRenderbuffers(1, [self.gl_color])
279
+ self.gl_color = None
280
+ if self.gl_depth_stencil is not None:
281
+ gl.glDeleteRenderbuffers(1, [self.gl_depth_stencil])
282
+ self.gl_depth_stencil = None
283
+
284
+ def __del__(self):
285
+ try:
286
+ self.delete()
287
+ except:
288
+ pass
289
+
290
+ @contextlib.contextmanager
291
+ def bind(self):
292
+ prev_fbo = gl.glGetInteger(gl.GL_FRAMEBUFFER_BINDING)
293
+ prev_rbo = gl.glGetInteger(gl.GL_RENDERBUFFER_BINDING)
294
+ gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.gl_id)
295
+ if self.width is not None and self.height is not None:
296
+ gl.glViewport(0, 0, self.width, self.height)
297
+ yield
298
+ gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, prev_fbo)
299
+ gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, prev_rbo)
300
+
301
+ def blit(self, dst=None):
302
+ assert dst is None or isinstance(dst, Framebuffer)
303
+ with self.bind():
304
+ gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0 if dst is None else dst.fbo)
305
+ gl.glBlitFramebuffer(0, 0, self.width, self.height, 0, 0, self.width, self.height, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
306
+
307
+ #----------------------------------------------------------------------------
308
+
309
+ def draw_shape(vertices, *, mode=gl.GL_TRIANGLE_FAN, pos=0, size=1, color=1, alpha=1):
310
+ assert vertices.ndim == 2 and vertices.shape[1] == 2
311
+ pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
312
+ size = np.broadcast_to(np.asarray(size, dtype='float32'), [2])
313
+ color = np.broadcast_to(np.asarray(color, dtype='float32'), [3])
314
+ alpha = np.clip(np.broadcast_to(np.asarray(alpha, dtype='float32'), []), 0, 1)
315
+
316
+ gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)
317
+ gl.glPushAttrib(gl.GL_CURRENT_BIT | gl.GL_TRANSFORM_BIT)
318
+ gl.glMatrixMode(gl.GL_MODELVIEW)
319
+ gl.glPushMatrix()
320
+
321
+ gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
322
+ gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
323
+ gl.glVertexPointer(2, gl.GL_FLOAT, 0, vertices)
324
+ gl.glTexCoordPointer(2, gl.GL_FLOAT, 0, vertices)
325
+ gl.glTranslate(pos[0], pos[1], 0)
326
+ gl.glScale(size[0], size[1], 1)
327
+ gl.glColor4f(color[0] * alpha, color[1] * alpha, color[2] * alpha, alpha)
328
+ gl.glDrawArrays(mode, 0, vertices.shape[0])
329
+
330
+ gl.glPopMatrix()
331
+ gl.glPopAttrib()
332
+ gl.glPopClientAttrib()
333
+
334
+ #----------------------------------------------------------------------------
335
+
336
+ def draw_arrow(x1, y1, x2, y2, l=10, width=1.0):
337
+ # Compute the length and angle of the arrow
338
+ dx = x2 - x1
339
+ dy = y2 - y1
340
+ length = math.sqrt(dx**2 + dy**2)
341
+ if length < l:
342
+ return
343
+ angle = math.atan2(dy, dx)
344
+
345
+ # Save the current modelview matrix
346
+ gl.glPushMatrix()
347
+
348
+ # Translate and rotate the coordinate system
349
+ gl.glTranslatef(x1, y1, 0.0)
350
+ gl.glRotatef(angle * 180.0 / math.pi, 0.0, 0.0, 1.0)
351
+
352
+ # Set the line width
353
+ gl.glLineWidth(width)
354
+ # gl.glColor3f(0.75, 0.75, 0.75)
355
+
356
+ # Begin drawing lines
357
+ gl.glBegin(gl.GL_LINES)
358
+
359
+ # Draw the shaft of the arrow
360
+ gl.glVertex2f(0.0, 0.0)
361
+ gl.glVertex2f(length, 0.0)
362
+
363
+ # Draw the head of the arrow
364
+ gl.glVertex2f(length, 0.0)
365
+ gl.glVertex2f(length - 2 * l, l)
366
+ gl.glVertex2f(length, 0.0)
367
+ gl.glVertex2f(length - 2 * l, -l)
368
+
369
+ # End drawing lines
370
+ gl.glEnd()
371
+
372
+ # Restore the modelview matrix
373
+ gl.glPopMatrix()
374
+
375
+ #----------------------------------------------------------------------------
376
+
377
+ def draw_rect(*, pos=0, pos2=None, size=None, align=0, rint=False, color=1, alpha=1, rounding=0):
378
+ assert pos2 is None or size is None
379
+ pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
380
+ pos2 = np.broadcast_to(np.asarray(pos2, dtype='float32'), [2]) if pos2 is not None else None
381
+ size = np.broadcast_to(np.asarray(size, dtype='float32'), [2]) if size is not None else None
382
+ size = size if size is not None else pos2 - pos if pos2 is not None else np.array([1, 1], dtype='float32')
383
+ pos = pos - size * align
384
+ if rint:
385
+ pos = np.rint(pos)
386
+ rounding = np.broadcast_to(np.asarray(rounding, dtype='float32'), [2])
387
+ rounding = np.minimum(np.abs(rounding) / np.maximum(np.abs(size), 1e-8), 0.5)
388
+ if np.min(rounding) == 0:
389
+ rounding *= 0
390
+ vertices = _setup_rect(float(rounding[0]), float(rounding[1]))
391
+ draw_shape(vertices, mode=gl.GL_TRIANGLE_FAN, pos=pos, size=size, color=color, alpha=alpha)
392
+
393
+ @functools.lru_cache(maxsize=10000)
394
+ def _setup_rect(rx, ry):
395
+ t = np.linspace(0, np.pi / 2, 1 if max(rx, ry) == 0 else 64)
396
+ s = 1 - np.sin(t); c = 1 - np.cos(t)
397
+ x = [c * rx, 1 - s * rx, 1 - c * rx, s * rx]
398
+ y = [s * ry, c * ry, 1 - s * ry, 1 - c * ry]
399
+ v = np.stack([x, y], axis=-1).reshape(-1, 2)
400
+ return v.astype('float32')
401
+
402
+ #----------------------------------------------------------------------------
403
+
404
+ def draw_circle(*, center=0, radius=100, hole=0, color=1, alpha=1):
405
+ hole = np.broadcast_to(np.asarray(hole, dtype='float32'), [])
406
+ vertices = _setup_circle(float(hole))
407
+ draw_shape(vertices, mode=gl.GL_TRIANGLE_STRIP, pos=center, size=radius, color=color, alpha=alpha)
408
+
409
+ @functools.lru_cache(maxsize=10000)
410
+ def _setup_circle(hole):
411
+ t = np.linspace(0, np.pi * 2, 128)
412
+ s = np.sin(t); c = np.cos(t)
413
+ v = np.stack([c, s, c * hole, s * hole], axis=-1).reshape(-1, 2)
414
+ return v.astype('float32')
415
+
416
+ #----------------------------------------------------------------------------
gui_utils/glfw_window.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ import time
10
+ import glfw
11
+ import OpenGL.GL as gl
12
+ from . import gl_utils
13
+
14
+ #----------------------------------------------------------------------------
15
+
16
+ class GlfwWindow: # pylint: disable=too-many-public-methods
17
+ def __init__(self, *, title='GlfwWindow', window_width=1920, window_height=1080, deferred_show=True, close_on_esc=True):
18
+ self._glfw_window = None
19
+ self._drawing_frame = False
20
+ self._frame_start_time = None
21
+ self._frame_delta = 0
22
+ self._fps_limit = None
23
+ self._vsync = None
24
+ self._skip_frames = 0
25
+ self._deferred_show = deferred_show
26
+ self._close_on_esc = close_on_esc
27
+ self._esc_pressed = False
28
+ self._drag_and_drop_paths = None
29
+ self._capture_next_frame = False
30
+ self._captured_frame = None
31
+
32
+ # Create window.
33
+ glfw.init()
34
+ glfw.window_hint(glfw.VISIBLE, False)
35
+ self._glfw_window = glfw.create_window(width=window_width, height=window_height, title=title, monitor=None, share=None)
36
+ self._attach_glfw_callbacks()
37
+ self.make_context_current()
38
+
39
+ # Adjust window.
40
+ self.set_vsync(False)
41
+ self.set_window_size(window_width, window_height)
42
+ if not self._deferred_show:
43
+ glfw.show_window(self._glfw_window)
44
+
45
+ def close(self):
46
+ if self._drawing_frame:
47
+ self.end_frame()
48
+ if self._glfw_window is not None:
49
+ glfw.destroy_window(self._glfw_window)
50
+ self._glfw_window = None
51
+ #glfw.terminate() # Commented out to play it nice with other glfw clients.
52
+
53
+ def __del__(self):
54
+ try:
55
+ self.close()
56
+ except:
57
+ pass
58
+
59
+ @property
60
+ def window_width(self):
61
+ return self.content_width
62
+
63
+ @property
64
+ def window_height(self):
65
+ return self.content_height + self.title_bar_height
66
+
67
+ @property
68
+ def content_width(self):
69
+ width, _height = glfw.get_window_size(self._glfw_window)
70
+ return width
71
+
72
+ @property
73
+ def content_height(self):
74
+ _width, height = glfw.get_window_size(self._glfw_window)
75
+ return height
76
+
77
+ @property
78
+ def title_bar_height(self):
79
+ _left, top, _right, _bottom = glfw.get_window_frame_size(self._glfw_window)
80
+ return top
81
+
82
+ @property
83
+ def monitor_width(self):
84
+ _, _, width, _height = glfw.get_monitor_workarea(glfw.get_primary_monitor())
85
+ return width
86
+
87
+ @property
88
+ def monitor_height(self):
89
+ _, _, _width, height = glfw.get_monitor_workarea(glfw.get_primary_monitor())
90
+ return height
91
+
92
+ @property
93
+ def frame_delta(self):
94
+ return self._frame_delta
95
+
96
+ def set_title(self, title):
97
+ glfw.set_window_title(self._glfw_window, title)
98
+
99
+ def set_window_size(self, width, height):
100
+ width = min(width, self.monitor_width)
101
+ height = min(height, self.monitor_height)
102
+ glfw.set_window_size(self._glfw_window, width, max(height - self.title_bar_height, 0))
103
+ if width == self.monitor_width and height == self.monitor_height:
104
+ self.maximize()
105
+
106
+ def set_content_size(self, width, height):
107
+ self.set_window_size(width, height + self.title_bar_height)
108
+
109
+ def maximize(self):
110
+ glfw.maximize_window(self._glfw_window)
111
+
112
+ def set_position(self, x, y):
113
+ glfw.set_window_pos(self._glfw_window, x, y + self.title_bar_height)
114
+
115
+ def center(self):
116
+ self.set_position((self.monitor_width - self.window_width) // 2, (self.monitor_height - self.window_height) // 2)
117
+
118
+ def set_vsync(self, vsync):
119
+ vsync = bool(vsync)
120
+ if vsync != self._vsync:
121
+ glfw.swap_interval(1 if vsync else 0)
122
+ self._vsync = vsync
123
+
124
+ def set_fps_limit(self, fps_limit):
125
+ self._fps_limit = int(fps_limit)
126
+
127
+ def should_close(self):
128
+ return glfw.window_should_close(self._glfw_window) or (self._close_on_esc and self._esc_pressed)
129
+
130
+ def skip_frame(self):
131
+ self.skip_frames(1)
132
+
133
+ def skip_frames(self, num): # Do not update window for the next N frames.
134
+ self._skip_frames = max(self._skip_frames, int(num))
135
+
136
+ def is_skipping_frames(self):
137
+ return self._skip_frames > 0
138
+
139
+ def capture_next_frame(self):
140
+ self._capture_next_frame = True
141
+
142
+ def pop_captured_frame(self):
143
+ frame = self._captured_frame
144
+ self._captured_frame = None
145
+ return frame
146
+
147
+ def pop_drag_and_drop_paths(self):
148
+ paths = self._drag_and_drop_paths
149
+ self._drag_and_drop_paths = None
150
+ return paths
151
+
152
+ def draw_frame(self): # To be overridden by subclass.
153
+ self.begin_frame()
154
+ # Rendering code goes here.
155
+ self.end_frame()
156
+
157
+ def make_context_current(self):
158
+ if self._glfw_window is not None:
159
+ glfw.make_context_current(self._glfw_window)
160
+
161
+ def begin_frame(self):
162
+ # End previous frame.
163
+ if self._drawing_frame:
164
+ self.end_frame()
165
+
166
+ # Apply FPS limit.
167
+ if self._frame_start_time is not None and self._fps_limit is not None:
168
+ delay = self._frame_start_time - time.perf_counter() + 1 / self._fps_limit
169
+ if delay > 0:
170
+ time.sleep(delay)
171
+ cur_time = time.perf_counter()
172
+ if self._frame_start_time is not None:
173
+ self._frame_delta = cur_time - self._frame_start_time
174
+ self._frame_start_time = cur_time
175
+
176
+ # Process events.
177
+ glfw.poll_events()
178
+
179
+ # Begin frame.
180
+ self._drawing_frame = True
181
+ self.make_context_current()
182
+
183
+ # Initialize GL state.
184
+ gl.glViewport(0, 0, self.content_width, self.content_height)
185
+ gl.glMatrixMode(gl.GL_PROJECTION)
186
+ gl.glLoadIdentity()
187
+ gl.glTranslate(-1, 1, 0)
188
+ gl.glScale(2 / max(self.content_width, 1), -2 / max(self.content_height, 1), 1)
189
+ gl.glMatrixMode(gl.GL_MODELVIEW)
190
+ gl.glLoadIdentity()
191
+ gl.glEnable(gl.GL_BLEND)
192
+ gl.glBlendFunc(gl.GL_ONE, gl.GL_ONE_MINUS_SRC_ALPHA) # Pre-multiplied alpha.
193
+
194
+ # Clear.
195
+ gl.glClearColor(0, 0, 0, 1)
196
+ gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
197
+
198
+ def end_frame(self):
199
+ assert self._drawing_frame
200
+ self._drawing_frame = False
201
+
202
+ # Skip frames if requested.
203
+ if self._skip_frames > 0:
204
+ self._skip_frames -= 1
205
+ return
206
+
207
+ # Capture frame if requested.
208
+ if self._capture_next_frame:
209
+ self._captured_frame = gl_utils.read_pixels(self.content_width, self.content_height)
210
+ self._capture_next_frame = False
211
+
212
+ # Update window.
213
+ if self._deferred_show:
214
+ glfw.show_window(self._glfw_window)
215
+ self._deferred_show = False
216
+ glfw.swap_buffers(self._glfw_window)
217
+
218
+ def _attach_glfw_callbacks(self):
219
+ glfw.set_key_callback(self._glfw_window, self._glfw_key_callback)
220
+ glfw.set_drop_callback(self._glfw_window, self._glfw_drop_callback)
221
+
222
+ def _glfw_key_callback(self, _window, key, _scancode, action, _mods):
223
+ if action == glfw.PRESS and key == glfw.KEY_ESCAPE:
224
+ self._esc_pressed = True
225
+
226
+ def _glfw_drop_callback(self, _window, paths):
227
+ self._drag_and_drop_paths = paths
228
+
229
+ #----------------------------------------------------------------------------
gui_utils/imgui_utils.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ import contextlib
10
+ import imgui
11
+
12
+ #----------------------------------------------------------------------------
13
+
14
+ def set_default_style(color_scheme='dark', spacing=9, indent=23, scrollbar=27):
15
+ s = imgui.get_style()
16
+ s.window_padding = [spacing, spacing]
17
+ s.item_spacing = [spacing, spacing]
18
+ s.item_inner_spacing = [spacing, spacing]
19
+ s.columns_min_spacing = spacing
20
+ s.indent_spacing = indent
21
+ s.scrollbar_size = scrollbar
22
+ s.frame_padding = [4, 3]
23
+ s.window_border_size = 1
24
+ s.child_border_size = 1
25
+ s.popup_border_size = 1
26
+ s.frame_border_size = 1
27
+ s.window_rounding = 0
28
+ s.child_rounding = 0
29
+ s.popup_rounding = 3
30
+ s.frame_rounding = 3
31
+ s.scrollbar_rounding = 3
32
+ s.grab_rounding = 3
33
+
34
+ getattr(imgui, f'style_colors_{color_scheme}')(s)
35
+ c0 = s.colors[imgui.COLOR_MENUBAR_BACKGROUND]
36
+ c1 = s.colors[imgui.COLOR_FRAME_BACKGROUND]
37
+ s.colors[imgui.COLOR_POPUP_BACKGROUND] = [x * 0.7 + y * 0.3 for x, y in zip(c0, c1)][:3] + [1]
38
+
39
+ #----------------------------------------------------------------------------
40
+
41
+ @contextlib.contextmanager
42
+ def grayed_out(cond=True):
43
+ if cond:
44
+ s = imgui.get_style()
45
+ text = s.colors[imgui.COLOR_TEXT_DISABLED]
46
+ grab = s.colors[imgui.COLOR_SCROLLBAR_GRAB]
47
+ back = s.colors[imgui.COLOR_MENUBAR_BACKGROUND]
48
+ imgui.push_style_color(imgui.COLOR_TEXT, *text)
49
+ imgui.push_style_color(imgui.COLOR_CHECK_MARK, *grab)
50
+ imgui.push_style_color(imgui.COLOR_SLIDER_GRAB, *grab)
51
+ imgui.push_style_color(imgui.COLOR_SLIDER_GRAB_ACTIVE, *grab)
52
+ imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND, *back)
53
+ imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND_HOVERED, *back)
54
+ imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND_ACTIVE, *back)
55
+ imgui.push_style_color(imgui.COLOR_BUTTON, *back)
56
+ imgui.push_style_color(imgui.COLOR_BUTTON_HOVERED, *back)
57
+ imgui.push_style_color(imgui.COLOR_BUTTON_ACTIVE, *back)
58
+ imgui.push_style_color(imgui.COLOR_HEADER, *back)
59
+ imgui.push_style_color(imgui.COLOR_HEADER_HOVERED, *back)
60
+ imgui.push_style_color(imgui.COLOR_HEADER_ACTIVE, *back)
61
+ imgui.push_style_color(imgui.COLOR_POPUP_BACKGROUND, *back)
62
+ yield
63
+ imgui.pop_style_color(14)
64
+ else:
65
+ yield
66
+
67
+ #----------------------------------------------------------------------------
68
+
69
+ @contextlib.contextmanager
70
+ def item_width(width=None):
71
+ if width is not None:
72
+ imgui.push_item_width(width)
73
+ yield
74
+ imgui.pop_item_width()
75
+ else:
76
+ yield
77
+
78
+ #----------------------------------------------------------------------------
79
+
80
+ def scoped_by_object_id(method):
81
+ def decorator(self, *args, **kwargs):
82
+ imgui.push_id(str(id(self)))
83
+ res = method(self, *args, **kwargs)
84
+ imgui.pop_id()
85
+ return res
86
+ return decorator
87
+
88
+ #----------------------------------------------------------------------------
89
+
90
+ def button(label, width=0, enabled=True):
91
+ with grayed_out(not enabled):
92
+ clicked = imgui.button(label, width=width)
93
+ clicked = clicked and enabled
94
+ return clicked
95
+
96
+ #----------------------------------------------------------------------------
97
+
98
+ def collapsing_header(text, visible=None, flags=0, default=False, enabled=True, show=True):
99
+ expanded = False
100
+ if show:
101
+ if default:
102
+ flags |= imgui.TREE_NODE_DEFAULT_OPEN
103
+ if not enabled:
104
+ flags |= imgui.TREE_NODE_LEAF
105
+ with grayed_out(not enabled):
106
+ expanded, visible = imgui.collapsing_header(text, visible=visible, flags=flags)
107
+ expanded = expanded and enabled
108
+ return expanded, visible
109
+
110
+ #----------------------------------------------------------------------------
111
+
112
+ def popup_button(label, width=0, enabled=True):
113
+ if button(label, width, enabled):
114
+ imgui.open_popup(label)
115
+ opened = imgui.begin_popup(label)
116
+ return opened
117
+
118
+ #----------------------------------------------------------------------------
119
+
120
+ def input_text(label, value, buffer_length, flags, width=None, help_text=''):
121
+ old_value = value
122
+ color = list(imgui.get_style().colors[imgui.COLOR_TEXT])
123
+ if value == '':
124
+ color[-1] *= 0.5
125
+ with item_width(width):
126
+ imgui.push_style_color(imgui.COLOR_TEXT, *color)
127
+ value = value if value != '' else help_text
128
+ changed, value = imgui.input_text(label, value, buffer_length, flags)
129
+ value = value if value != help_text else ''
130
+ imgui.pop_style_color(1)
131
+ if not flags & imgui.INPUT_TEXT_ENTER_RETURNS_TRUE:
132
+ changed = (value != old_value)
133
+ return changed, value
134
+
135
+ #----------------------------------------------------------------------------
136
+
137
+ def drag_previous_control(enabled=True):
138
+ dragging = False
139
+ dx = 0
140
+ dy = 0
141
+ if imgui.begin_drag_drop_source(imgui.DRAG_DROP_SOURCE_NO_PREVIEW_TOOLTIP):
142
+ if enabled:
143
+ dragging = True
144
+ dx, dy = imgui.get_mouse_drag_delta()
145
+ imgui.reset_mouse_drag_delta()
146
+ imgui.end_drag_drop_source()
147
+ return dragging, dx, dy
148
+
149
+ #----------------------------------------------------------------------------
150
+
151
+ def drag_button(label, width=0, enabled=True):
152
+ clicked = button(label, width=width, enabled=enabled)
153
+ dragging, dx, dy = drag_previous_control(enabled=enabled)
154
+ return clicked, dragging, dx, dy
155
+
156
+ #----------------------------------------------------------------------------
157
+
158
+ def drag_hidden_window(label, x, y, width, height, enabled=True):
159
+ imgui.push_style_color(imgui.COLOR_WINDOW_BACKGROUND, 0, 0, 0, 0)
160
+ imgui.push_style_color(imgui.COLOR_BORDER, 0, 0, 0, 0)
161
+ imgui.set_next_window_position(x, y)
162
+ imgui.set_next_window_size(width, height)
163
+ imgui.begin(label, closable=False, flags=(imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE))
164
+ dragging, dx, dy = drag_previous_control(enabled=enabled)
165
+ imgui.end()
166
+ imgui.pop_style_color(2)
167
+ return dragging, dx, dy
168
+
169
+ #----------------------------------------------------------------------------
170
+
171
+ def click_hidden_window(label, x, y, width, height, img_w, img_h, enabled=True):
172
+ imgui.push_style_color(imgui.COLOR_WINDOW_BACKGROUND, 0, 0, 0, 0)
173
+ imgui.push_style_color(imgui.COLOR_BORDER, 0, 0, 0, 0)
174
+ imgui.set_next_window_position(x, y)
175
+ imgui.set_next_window_size(width, height)
176
+ imgui.begin(label, closable=False, flags=(imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE))
177
+ clicked, down = False, False
178
+ img_x, img_y = 0, 0
179
+ if imgui.is_mouse_down():
180
+ posx, posy = imgui.get_mouse_pos()
181
+ if posx >= x and posx < x + width and posy >= y and posy < y + height:
182
+ if imgui.is_mouse_clicked():
183
+ clicked = True
184
+ down = True
185
+ img_x = round((posx - x) / (width - 1) * (img_w - 1))
186
+ img_y = round((posy - y) / (height - 1) * (img_h - 1))
187
+ imgui.end()
188
+ imgui.pop_style_color(2)
189
+ return clicked, down, img_x, img_y
190
+
191
+ #----------------------------------------------------------------------------
gui_utils/imgui_window.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ import os
10
+ import imgui
11
+ import imgui.integrations.glfw
12
+
13
+ from . import glfw_window
14
+ from . import imgui_utils
15
+ from . import text_utils
16
+
17
+ #----------------------------------------------------------------------------
18
+
19
+ class ImguiWindow(glfw_window.GlfwWindow):
20
+ def __init__(self, *, title='ImguiWindow', font=None, font_sizes=range(14,24), **glfw_kwargs):
21
+ if font is None:
22
+ font = text_utils.get_default_font()
23
+ font_sizes = {int(size) for size in font_sizes}
24
+ super().__init__(title=title, **glfw_kwargs)
25
+
26
+ # Init fields.
27
+ self._imgui_context = None
28
+ self._imgui_renderer = None
29
+ self._imgui_fonts = None
30
+ self._cur_font_size = max(font_sizes)
31
+
32
+ # Delete leftover imgui.ini to avoid unexpected behavior.
33
+ if os.path.isfile('imgui.ini'):
34
+ os.remove('imgui.ini')
35
+
36
+ # Init ImGui.
37
+ self._imgui_context = imgui.create_context()
38
+ self._imgui_renderer = _GlfwRenderer(self._glfw_window)
39
+ self._attach_glfw_callbacks()
40
+ imgui.get_io().ini_saving_rate = 0 # Disable creating imgui.ini at runtime.
41
+ imgui.get_io().mouse_drag_threshold = 0 # Improve behavior with imgui_utils.drag_custom().
42
+ self._imgui_fonts = {size: imgui.get_io().fonts.add_font_from_file_ttf(font, size) for size in font_sizes}
43
+ self._imgui_renderer.refresh_font_texture()
44
+
45
+ def close(self):
46
+ self.make_context_current()
47
+ self._imgui_fonts = None
48
+ if self._imgui_renderer is not None:
49
+ self._imgui_renderer.shutdown()
50
+ self._imgui_renderer = None
51
+ if self._imgui_context is not None:
52
+ #imgui.destroy_context(self._imgui_context) # Commented out to avoid creating imgui.ini at the end.
53
+ self._imgui_context = None
54
+ super().close()
55
+
56
+ def _glfw_key_callback(self, *args):
57
+ super()._glfw_key_callback(*args)
58
+ self._imgui_renderer.keyboard_callback(*args)
59
+
60
+ @property
61
+ def font_size(self):
62
+ return self._cur_font_size
63
+
64
+ @property
65
+ def spacing(self):
66
+ return round(self._cur_font_size * 0.4)
67
+
68
+ def set_font_size(self, target): # Applied on next frame.
69
+ self._cur_font_size = min((abs(key - target), key) for key in self._imgui_fonts.keys())[1]
70
+
71
+ def begin_frame(self):
72
+ # Begin glfw frame.
73
+ super().begin_frame()
74
+
75
+ # Process imgui events.
76
+ self._imgui_renderer.mouse_wheel_multiplier = self._cur_font_size / 10
77
+ if self.content_width > 0 and self.content_height > 0:
78
+ self._imgui_renderer.process_inputs()
79
+
80
+ # Begin imgui frame.
81
+ imgui.new_frame()
82
+ imgui.push_font(self._imgui_fonts[self._cur_font_size])
83
+ imgui_utils.set_default_style(spacing=self.spacing, indent=self.font_size, scrollbar=self.font_size+4)
84
+
85
+ def end_frame(self):
86
+ imgui.pop_font()
87
+ imgui.render()
88
+ imgui.end_frame()
89
+ self._imgui_renderer.render(imgui.get_draw_data())
90
+ super().end_frame()
91
+
92
+ #----------------------------------------------------------------------------
93
+ # Wrapper class for GlfwRenderer to fix a mouse wheel bug on Linux.
94
+
95
+ class _GlfwRenderer(imgui.integrations.glfw.GlfwRenderer):
96
+ def __init__(self, *args, **kwargs):
97
+ super().__init__(*args, **kwargs)
98
+ self.mouse_wheel_multiplier = 1
99
+
100
+ def scroll_callback(self, window, x_offset, y_offset):
101
+ self.io.mouse_wheel += y_offset * self.mouse_wheel_multiplier
102
+
103
+ #----------------------------------------------------------------------------
gui_utils/text_utils.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ import functools
10
+ from typing import Optional
11
+
12
+ import dnnlib
13
+ import numpy as np
14
+ import PIL.Image
15
+ import PIL.ImageFont
16
+ import scipy.ndimage
17
+
18
+ from . import gl_utils
19
+
20
+ #----------------------------------------------------------------------------
21
+
22
+ def get_default_font():
23
+ url = 'http://fonts.gstatic.com/s/opensans/v17/mem8YaGs126MiZpBA-U1UpcaXcl0Aw.ttf' # Open Sans regular
24
+ return dnnlib.util.open_url(url, return_filename=True)
25
+
26
+ #----------------------------------------------------------------------------
27
+
28
+ @functools.lru_cache(maxsize=None)
29
+ def get_pil_font(font=None, size=32):
30
+ if font is None:
31
+ font = get_default_font()
32
+ return PIL.ImageFont.truetype(font=font, size=size)
33
+
34
+ #----------------------------------------------------------------------------
35
+
36
+ def get_array(string, *, dropshadow_radius: int=None, **kwargs):
37
+ if dropshadow_radius is not None:
38
+ offset_x = int(np.ceil(dropshadow_radius*2/3))
39
+ offset_y = int(np.ceil(dropshadow_radius*2/3))
40
+ return _get_array_priv(string, dropshadow_radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs)
41
+ else:
42
+ return _get_array_priv(string, **kwargs)
43
+
44
+ @functools.lru_cache(maxsize=10000)
45
+ def _get_array_priv(
46
+ string: str, *,
47
+ size: int = 32,
48
+ max_width: Optional[int]=None,
49
+ max_height: Optional[int]=None,
50
+ min_size=10,
51
+ shrink_coef=0.8,
52
+ dropshadow_radius: int=None,
53
+ offset_x: int=None,
54
+ offset_y: int=None,
55
+ **kwargs
56
+ ):
57
+ cur_size = size
58
+ array = None
59
+ while True:
60
+ if dropshadow_radius is not None:
61
+ # separate implementation for dropshadow text rendering
62
+ array = _get_array_impl_dropshadow(string, size=cur_size, radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs)
63
+ else:
64
+ array = _get_array_impl(string, size=cur_size, **kwargs)
65
+ height, width, _ = array.shape
66
+ if (max_width is None or width <= max_width) and (max_height is None or height <= max_height) or (cur_size <= min_size):
67
+ break
68
+ cur_size = max(int(cur_size * shrink_coef), min_size)
69
+ return array
70
+
71
+ #----------------------------------------------------------------------------
72
+
73
+ @functools.lru_cache(maxsize=10000)
74
+ def _get_array_impl(string, *, font=None, size=32, outline=0, outline_pad=3, outline_coef=3, outline_exp=2, line_pad: int=None):
75
+ pil_font = get_pil_font(font=font, size=size)
76
+ lines = [pil_font.getmask(line, 'L') for line in string.split('\n')]
77
+ lines = [np.array(line, dtype=np.uint8).reshape([line.size[1], line.size[0]]) for line in lines]
78
+ width = max(line.shape[1] for line in lines)
79
+ lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), mode='constant') for line in lines]
80
+ line_spacing = line_pad if line_pad is not None else size // 2
81
+ lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') for line in lines[:-1]] + lines[-1:]
82
+ mask = np.concatenate(lines, axis=0)
83
+ alpha = mask
84
+ if outline > 0:
85
+ mask = np.pad(mask, int(np.ceil(outline * outline_pad)), mode='constant', constant_values=0)
86
+ alpha = mask.astype(np.float32) / 255
87
+ alpha = scipy.ndimage.gaussian_filter(alpha, outline)
88
+ alpha = 1 - np.maximum(1 - alpha * outline_coef, 0) ** outline_exp
89
+ alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8)
90
+ alpha = np.maximum(alpha, mask)
91
+ return np.stack([mask, alpha], axis=-1)
92
+
93
+ #----------------------------------------------------------------------------
94
+
95
+ @functools.lru_cache(maxsize=10000)
96
+ def _get_array_impl_dropshadow(string, *, font=None, size=32, radius: int, offset_x: int, offset_y: int, line_pad: int=None, **kwargs):
97
+ assert (offset_x > 0) and (offset_y > 0)
98
+ pil_font = get_pil_font(font=font, size=size)
99
+ lines = [pil_font.getmask(line, 'L') for line in string.split('\n')]
100
+ lines = [np.array(line, dtype=np.uint8).reshape([line.size[1], line.size[0]]) for line in lines]
101
+ width = max(line.shape[1] for line in lines)
102
+ lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), mode='constant') for line in lines]
103
+ line_spacing = line_pad if line_pad is not None else size // 2
104
+ lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') for line in lines[:-1]] + lines[-1:]
105
+ mask = np.concatenate(lines, axis=0)
106
+ alpha = mask
107
+
108
+ mask = np.pad(mask, 2*radius + max(abs(offset_x), abs(offset_y)), mode='constant', constant_values=0)
109
+ alpha = mask.astype(np.float32) / 255
110
+ alpha = scipy.ndimage.gaussian_filter(alpha, radius)
111
+ alpha = 1 - np.maximum(1 - alpha * 1.5, 0) ** 1.4
112
+ alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8)
113
+ alpha = np.pad(alpha, [(offset_y, 0), (offset_x, 0)], mode='constant')[:-offset_y, :-offset_x]
114
+ alpha = np.maximum(alpha, mask)
115
+ return np.stack([mask, alpha], axis=-1)
116
+
117
+ #----------------------------------------------------------------------------
118
+
119
+ @functools.lru_cache(maxsize=10000)
120
+ def get_texture(string, bilinear=True, mipmap=True, **kwargs):
121
+ return gl_utils.Texture(image=get_array(string, **kwargs), bilinear=bilinear, mipmap=mipmap)
122
+
123
+ #----------------------------------------------------------------------------
legacy.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ """Converting legacy network pickle into the new format."""
10
+
11
+ import click
12
+ import pickle
13
+ import re
14
+ import copy
15
+ import numpy as np
16
+ import torch
17
+ import dnnlib
18
+ from torch_utils import misc
19
+
20
+ #----------------------------------------------------------------------------
21
+
22
+ def load_network_pkl(f, force_fp16=False):
23
+ data = _LegacyUnpickler(f).load()
24
+
25
+ # Legacy TensorFlow pickle => convert.
26
+ if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
27
+ tf_G, tf_D, tf_Gs = data
28
+ G = convert_tf_generator(tf_G)
29
+ D = convert_tf_discriminator(tf_D)
30
+ G_ema = convert_tf_generator(tf_Gs)
31
+ data = dict(G=G, D=D, G_ema=G_ema)
32
+
33
+ # Add missing fields.
34
+ if 'training_set_kwargs' not in data:
35
+ data['training_set_kwargs'] = None
36
+ if 'augment_pipe' not in data:
37
+ data['augment_pipe'] = None
38
+
39
+ # Validate contents.
40
+ assert isinstance(data['G'], torch.nn.Module)
41
+ assert isinstance(data['D'], torch.nn.Module)
42
+ assert isinstance(data['G_ema'], torch.nn.Module)
43
+ assert isinstance(data['training_set_kwargs'], (dict, type(None)))
44
+ assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
45
+
46
+ # Force FP16.
47
+ if force_fp16:
48
+ for key in ['G', 'D', 'G_ema']:
49
+ old = data[key]
50
+ kwargs = copy.deepcopy(old.init_kwargs)
51
+ fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs)
52
+ fp16_kwargs.num_fp16_res = 4
53
+ fp16_kwargs.conv_clamp = 256
54
+ if kwargs != old.init_kwargs:
55
+ new = type(old)(**kwargs).eval().requires_grad_(False)
56
+ misc.copy_params_and_buffers(old, new, require_all=True)
57
+ data[key] = new
58
+ return data
59
+
60
+ #----------------------------------------------------------------------------
61
+
62
+ class _TFNetworkStub(dnnlib.EasyDict):
63
+ pass
64
+
65
+ class _LegacyUnpickler(pickle.Unpickler):
66
+ def find_class(self, module, name):
67
+ if module == 'dnnlib.tflib.network' and name == 'Network':
68
+ return _TFNetworkStub
69
+ return super().find_class(module, name)
70
+
71
+ #----------------------------------------------------------------------------
72
+
73
+ def _collect_tf_params(tf_net):
74
+ # pylint: disable=protected-access
75
+ tf_params = dict()
76
+ def recurse(prefix, tf_net):
77
+ for name, value in tf_net.variables:
78
+ tf_params[prefix + name] = value
79
+ for name, comp in tf_net.components.items():
80
+ recurse(prefix + name + '/', comp)
81
+ recurse('', tf_net)
82
+ return tf_params
83
+
84
+ #----------------------------------------------------------------------------
85
+
86
+ def _populate_module_params(module, *patterns):
87
+ for name, tensor in misc.named_params_and_buffers(module):
88
+ found = False
89
+ value = None
90
+ for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
91
+ match = re.fullmatch(pattern, name)
92
+ if match:
93
+ found = True
94
+ if value_fn is not None:
95
+ value = value_fn(*match.groups())
96
+ break
97
+ try:
98
+ assert found
99
+ if value is not None:
100
+ tensor.copy_(torch.from_numpy(np.array(value)))
101
+ except:
102
+ print(name, list(tensor.shape))
103
+ raise
104
+
105
+ #----------------------------------------------------------------------------
106
+
107
+ def convert_tf_generator(tf_G):
108
+ if tf_G.version < 4:
109
+ raise ValueError('TensorFlow pickle version too low')
110
+
111
+ # Collect kwargs.
112
+ tf_kwargs = tf_G.static_kwargs
113
+ known_kwargs = set()
114
+ def kwarg(tf_name, default=None, none=None):
115
+ known_kwargs.add(tf_name)
116
+ val = tf_kwargs.get(tf_name, default)
117
+ return val if val is not None else none
118
+
119
+ # Convert kwargs.
120
+ from training import networks_stylegan2
121
+ network_class = networks_stylegan2.Generator
122
+ kwargs = dnnlib.EasyDict(
123
+ z_dim = kwarg('latent_size', 512),
124
+ c_dim = kwarg('label_size', 0),
125
+ w_dim = kwarg('dlatent_size', 512),
126
+ img_resolution = kwarg('resolution', 1024),
127
+ img_channels = kwarg('num_channels', 3),
128
+ channel_base = kwarg('fmap_base', 16384) * 2,
129
+ channel_max = kwarg('fmap_max', 512),
130
+ num_fp16_res = kwarg('num_fp16_res', 0),
131
+ conv_clamp = kwarg('conv_clamp', None),
132
+ architecture = kwarg('architecture', 'skip'),
133
+ resample_filter = kwarg('resample_kernel', [1,3,3,1]),
134
+ use_noise = kwarg('use_noise', True),
135
+ activation = kwarg('nonlinearity', 'lrelu'),
136
+ mapping_kwargs = dnnlib.EasyDict(
137
+ num_layers = kwarg('mapping_layers', 8),
138
+ embed_features = kwarg('label_fmaps', None),
139
+ layer_features = kwarg('mapping_fmaps', None),
140
+ activation = kwarg('mapping_nonlinearity', 'lrelu'),
141
+ lr_multiplier = kwarg('mapping_lrmul', 0.01),
142
+ w_avg_beta = kwarg('w_avg_beta', 0.995, none=1),
143
+ ),
144
+ )
145
+
146
+ # Check for unknown kwargs.
147
+ kwarg('truncation_psi')
148
+ kwarg('truncation_cutoff')
149
+ kwarg('style_mixing_prob')
150
+ kwarg('structure')
151
+ kwarg('conditioning')
152
+ kwarg('fused_modconv')
153
+ unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
154
+ if len(unknown_kwargs) > 0:
155
+ raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
156
+
157
+ # Collect params.
158
+ tf_params = _collect_tf_params(tf_G)
159
+ for name, value in list(tf_params.items()):
160
+ match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
161
+ if match:
162
+ r = kwargs.img_resolution // (2 ** int(match.group(1)))
163
+ tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
164
+ kwargs.synthesis.kwargs.architecture = 'orig'
165
+ #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
166
+
167
+ # Convert params.
168
+ G = network_class(**kwargs).eval().requires_grad_(False)
169
+ # pylint: disable=unnecessary-lambda
170
+ # pylint: disable=f-string-without-interpolation
171
+ _populate_module_params(G,
172
+ r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
173
+ r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(),
174
+ r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
175
+ r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(),
176
+ r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
177
+ r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
178
+ r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1),
179
+ r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'],
180
+ r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0],
181
+ r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'],
182
+ r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(),
183
+ r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1,
184
+ r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
185
+ r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'],
186
+ r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
187
+ r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
188
+ r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(),
189
+ r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
190
+ r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1),
191
+ r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'],
192
+ r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
193
+ r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'],
194
+ r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(),
195
+ r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
196
+ r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1),
197
+ r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'],
198
+ r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(),
199
+ r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
200
+ r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
201
+ r'.*\.resample_filter', None,
202
+ r'.*\.act_filter', None,
203
+ )
204
+ return G
205
+
206
+ #----------------------------------------------------------------------------
207
+
208
+ def convert_tf_discriminator(tf_D):
209
+ if tf_D.version < 4:
210
+ raise ValueError('TensorFlow pickle version too low')
211
+
212
+ # Collect kwargs.
213
+ tf_kwargs = tf_D.static_kwargs
214
+ known_kwargs = set()
215
+ def kwarg(tf_name, default=None):
216
+ known_kwargs.add(tf_name)
217
+ return tf_kwargs.get(tf_name, default)
218
+
219
+ # Convert kwargs.
220
+ kwargs = dnnlib.EasyDict(
221
+ c_dim = kwarg('label_size', 0),
222
+ img_resolution = kwarg('resolution', 1024),
223
+ img_channels = kwarg('num_channels', 3),
224
+ architecture = kwarg('architecture', 'resnet'),
225
+ channel_base = kwarg('fmap_base', 16384) * 2,
226
+ channel_max = kwarg('fmap_max', 512),
227
+ num_fp16_res = kwarg('num_fp16_res', 0),
228
+ conv_clamp = kwarg('conv_clamp', None),
229
+ cmap_dim = kwarg('mapping_fmaps', None),
230
+ block_kwargs = dnnlib.EasyDict(
231
+ activation = kwarg('nonlinearity', 'lrelu'),
232
+ resample_filter = kwarg('resample_kernel', [1,3,3,1]),
233
+ freeze_layers = kwarg('freeze_layers', 0),
234
+ ),
235
+ mapping_kwargs = dnnlib.EasyDict(
236
+ num_layers = kwarg('mapping_layers', 0),
237
+ embed_features = kwarg('mapping_fmaps', None),
238
+ layer_features = kwarg('mapping_fmaps', None),
239
+ activation = kwarg('nonlinearity', 'lrelu'),
240
+ lr_multiplier = kwarg('mapping_lrmul', 0.1),
241
+ ),
242
+ epilogue_kwargs = dnnlib.EasyDict(
243
+ mbstd_group_size = kwarg('mbstd_group_size', None),
244
+ mbstd_num_channels = kwarg('mbstd_num_features', 1),
245
+ activation = kwarg('nonlinearity', 'lrelu'),
246
+ ),
247
+ )
248
+
249
+ # Check for unknown kwargs.
250
+ kwarg('structure')
251
+ kwarg('conditioning')
252
+ unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
253
+ if len(unknown_kwargs) > 0:
254
+ raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
255
+
256
+ # Collect params.
257
+ tf_params = _collect_tf_params(tf_D)
258
+ for name, value in list(tf_params.items()):
259
+ match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
260
+ if match:
261
+ r = kwargs.img_resolution // (2 ** int(match.group(1)))
262
+ tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
263
+ kwargs.architecture = 'orig'
264
+ #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
265
+
266
+ # Convert params.
267
+ from training import networks_stylegan2
268
+ D = networks_stylegan2.Discriminator(**kwargs).eval().requires_grad_(False)
269
+ # pylint: disable=unnecessary-lambda
270
+ # pylint: disable=f-string-without-interpolation
271
+ _populate_module_params(D,
272
+ r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1),
273
+ r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
274
+ r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1),
275
+ r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
276
+ r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1),
277
+ r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(),
278
+ r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
279
+ r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(),
280
+ r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
281
+ r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1),
282
+ r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
283
+ r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(),
284
+ r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
285
+ r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(),
286
+ r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
287
+ r'.*\.resample_filter', None,
288
+ )
289
+ return D
290
+
291
+ #----------------------------------------------------------------------------
292
+
293
+ @click.command()
294
+ @click.option('--source', help='Input pickle', required=True, metavar='PATH')
295
+ @click.option('--dest', help='Output pickle', required=True, metavar='PATH')
296
+ @click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
297
+ def convert_network_pickle(source, dest, force_fp16):
298
+ """Convert legacy network pickle into the native PyTorch format.
299
+
300
+ The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
301
+ It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
302
+
303
+ Example:
304
+
305
+ \b
306
+ python legacy.py \\
307
+ --source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
308
+ --dest=stylegan2-cat-config-f.pkl
309
+ """
310
+ print(f'Loading "{source}"...')
311
+ with dnnlib.util.open_url(source) as f:
312
+ data = load_network_pkl(f, force_fp16=force_fp16)
313
+ print(f'Saving "{dest}"...')
314
+ with open(dest, 'wb') as f:
315
+ pickle.dump(data, f)
316
+ print('Done.')
317
+
318
+ #----------------------------------------------------------------------------
319
+
320
+ if __name__ == "__main__":
321
+ convert_network_pickle() # pylint: disable=no-value-for-parameter
322
+
323
+ #----------------------------------------------------------------------------
requirements.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ pip
3
+ numpy>=1.20
4
+ click>=8.0
5
+ pillow==8.3.1
6
+ scipy==1.7.1
7
+ pytorch==1.9.1
8
+ cudatoolkit==11.1
9
+ requests==2.26.0
10
+ tqdm==4.62.2
11
+ ninja==1.10.2
12
+ matplotlib==3.4.2
13
+ imageio==2.9.0
14
+ imgui==1.3.0
15
+ glfw==2.2.0
16
+ pyopengl==3.1.5
17
+ imageio-ffmpeg==0.4.3
18
+ pyspng
19
+ Ninja
20
+ gradio
scripts/download_model.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mkdir checkpoints
2
+ cd checkpoints
3
+
4
+ wget https://storage.googleapis.com/self-distilled-stylegan/lions_512_pytorch.pkl
5
+ mv lions_512_pytorch.pkl stylegan2_lions_512_pytorch.pkl
6
+
7
+ wget https://storage.googleapis.com/self-distilled-stylegan/dogs_1024_pytorch.pkl
8
+ mv dogs_1024_pytorch.pkl stylegan2_dogs_1024_pytorch.pkl
9
+
10
+ wget https://storage.googleapis.com/self-distilled-stylegan/horses_256_pytorch.pkl
11
+ mv horses_256_pytorch.pkl stylegan2_horses_256_pytorch.pkl
12
+
13
+ wget https://storage.googleapis.com/self-distilled-stylegan/elephants_512_pytorch.pkl
14
+ mv elephants_512_pytorch.pkl stylegan2_elephants_512_pytorch.pkl
15
+
16
+ wget https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-ffhq-512x512.pkl
17
+ wget https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan2/versions/1/files/stylegan2-afhqcat-512x512.pkl
18
+ wget http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-car-config-f.pkl
19
+ wget http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-cat-config-f.pkl
scripts/gui.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python visualizer_drag.py \
2
+ checkpoints/stylegan2_lions_512_pytorch.pkl \
3
+ checkpoints/stylegan2-ffhq-512x512.pkl \
4
+ checkpoints/stylegan2-afhqcat-512x512.pkl \
5
+ checkpoints/stylegan2-car-config-f.pkl \
6
+ checkpoints/stylegan2_dogs_1024_pytorch.pkl \
7
+ checkpoints/stylegan2_horses_256_pytorch.pkl \
8
+ checkpoints/stylegan2-cat-config-f.pkl \
9
+ checkpoints/stylegan2_elephants_512_pytorch.pkl \
10
+ checkpoints/stylegan_human_v2_512.pkl \
11
+ checkpoints/stylegan2-lhq-256x256.pkl
stylegan_human/.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ __pycache__
3
+ *.pt
4
+ *.pth
5
+ *.pdparams
6
+ *.pdiparams
7
+ *.pdmodel
8
+ *.pkl
9
+ *.info
10
+ *.yaml
stylegan_human/PP_HumanSeg/deploy/infer.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+
4
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ import codecs
19
+ import os
20
+ import time
21
+
22
+ import yaml
23
+ import numpy as np
24
+ import cv2
25
+ import paddle
26
+ import paddleseg.transforms as T
27
+ from paddle.inference import create_predictor, PrecisionType
28
+ from paddle.inference import Config as PredictConfig
29
+ from paddleseg.core.infer import reverse_transform
30
+ from paddleseg.cvlibs import manager
31
+ from paddleseg.utils import TimeAverager
32
+
33
+ from ..scripts.optic_flow_process import optic_flow_process
34
+
35
+
36
+ class DeployConfig:
37
+ def __init__(self, path):
38
+ with codecs.open(path, 'r', 'utf-8') as file:
39
+ self.dic = yaml.load(file, Loader=yaml.FullLoader)
40
+
41
+ self._transforms = self._load_transforms(self.dic['Deploy'][
42
+ 'transforms'])
43
+ self._dir = os.path.dirname(path)
44
+
45
+ @property
46
+ def transforms(self):
47
+ return self._transforms
48
+
49
+ @property
50
+ def model(self):
51
+ return os.path.join(self._dir, self.dic['Deploy']['model'])
52
+
53
+ @property
54
+ def params(self):
55
+ return os.path.join(self._dir, self.dic['Deploy']['params'])
56
+
57
+ def _load_transforms(self, t_list):
58
+ com = manager.TRANSFORMS
59
+ transforms = []
60
+ for t in t_list:
61
+ ctype = t.pop('type')
62
+ transforms.append(com[ctype](**t))
63
+
64
+ return transforms
65
+
66
+
67
+ class Predictor:
68
+ def __init__(self, args):
69
+ self.cfg = DeployConfig(args.cfg)
70
+ self.args = args
71
+ self.compose = T.Compose(self.cfg.transforms)
72
+ resize_h, resize_w = args.input_shape
73
+
74
+ self.disflow = cv2.DISOpticalFlow_create(
75
+ cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST)
76
+ self.prev_gray = np.zeros((resize_h, resize_w), np.uint8)
77
+ self.prev_cfd = np.zeros((resize_h, resize_w), np.float32)
78
+ self.is_init = True
79
+
80
+ pred_cfg = PredictConfig(self.cfg.model, self.cfg.params)
81
+ pred_cfg.disable_glog_info()
82
+ if self.args.use_gpu:
83
+ pred_cfg.enable_use_gpu(100, 0)
84
+
85
+ self.predictor = create_predictor(pred_cfg)
86
+ if self.args.test_speed:
87
+ self.cost_averager = TimeAverager()
88
+
89
+ def preprocess(self, img):
90
+ ori_shapes = []
91
+ processed_imgs = []
92
+ processed_img = self.compose(img)[0]
93
+ processed_imgs.append(processed_img)
94
+ ori_shapes.append(img.shape)
95
+ return processed_imgs, ori_shapes
96
+
97
+ def run(self, img, bg):
98
+ input_names = self.predictor.get_input_names()
99
+ input_handle = self.predictor.get_input_handle(input_names[0])
100
+ processed_imgs, ori_shapes = self.preprocess(img)
101
+ data = np.array(processed_imgs)
102
+ input_handle.reshape(data.shape)
103
+ input_handle.copy_from_cpu(data)
104
+ if self.args.test_speed:
105
+ start = time.time()
106
+
107
+ self.predictor.run()
108
+
109
+ if self.args.test_speed:
110
+ self.cost_averager.record(time.time() - start)
111
+ output_names = self.predictor.get_output_names()
112
+ output_handle = self.predictor.get_output_handle(output_names[0])
113
+ output = output_handle.copy_to_cpu()
114
+ return self.postprocess(output, img, ori_shapes[0], bg)
115
+
116
+
117
+ def postprocess(self, pred, img, ori_shape, bg):
118
+ if not os.path.exists(self.args.save_dir):
119
+ os.makedirs(self.args.save_dir)
120
+ resize_w = pred.shape[-1]
121
+ resize_h = pred.shape[-2]
122
+ if self.args.soft_predict:
123
+ if self.args.use_optic_flow:
124
+ score_map = pred[:, 1, :, :].squeeze(0)
125
+ score_map = 255 * score_map
126
+ cur_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
127
+ cur_gray = cv2.resize(cur_gray, (resize_w, resize_h))
128
+ optflow_map = optic_flow_process(cur_gray, score_map, self.prev_gray, self.prev_cfd, \
129
+ self.disflow, self.is_init)
130
+ self.prev_gray = cur_gray.copy()
131
+ self.prev_cfd = optflow_map.copy()
132
+ self.is_init = False
133
+
134
+ score_map = np.repeat(optflow_map[:, :, np.newaxis], 3, axis=2)
135
+ score_map = np.transpose(score_map, [2, 0, 1])[np.newaxis, ...]
136
+ score_map = reverse_transform(
137
+ paddle.to_tensor(score_map),
138
+ ori_shape,
139
+ self.cfg.transforms,
140
+ mode='bilinear')
141
+ alpha = np.transpose(score_map.numpy().squeeze(0),
142
+ [1, 2, 0]) / 255
143
+ else:
144
+ score_map = pred[:, 1, :, :]
145
+ score_map = score_map[np.newaxis, ...]
146
+ score_map = reverse_transform(
147
+ paddle.to_tensor(score_map),
148
+ ori_shape,
149
+ self.cfg.transforms,
150
+ mode='bilinear')
151
+ alpha = np.transpose(score_map.numpy().squeeze(0), [1, 2, 0])
152
+
153
+ else:
154
+ if pred.ndim == 3:
155
+ pred = pred[:, np.newaxis, ...]
156
+ result = reverse_transform(
157
+ paddle.to_tensor(
158
+ pred, dtype='float32'),
159
+ ori_shape,
160
+ self.cfg.transforms,
161
+ mode='bilinear')
162
+
163
+ result = np.array(result)
164
+ if self.args.add_argmax:
165
+ result = np.argmax(result, axis=1)
166
+ else:
167
+ result = result.squeeze(1)
168
+ alpha = np.transpose(result, [1, 2, 0])
169
+
170
+ # background replace
171
+ h, w, _ = img.shape
172
+ if bg is None:
173
+ bg = np.ones_like(img)*255
174
+ else:
175
+ bg = cv2.resize(bg, (w, h))
176
+ if bg.ndim == 2:
177
+ bg = bg[..., np.newaxis]
178
+
179
+ comb = (alpha * img + (1 - alpha) * bg).astype(np.uint8)
180
+ return comb, alpha, bg, img
stylegan_human/PP_HumanSeg/export_model/download_export_model.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf8
2
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import sys
17
+ import os
18
+
19
+ LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
20
+ TEST_PATH = os.path.join(LOCAL_PATH, "../../../", "test")
21
+ sys.path.append(TEST_PATH)
22
+
23
+ from paddleseg.utils.download import download_file_and_uncompress
24
+
25
+ model_urls = {
26
+ "pphumanseg_lite_portrait_398x224_with_softmax":
27
+ "https://paddleseg.bj.bcebos.com/dygraph/ppseg/ppseg_lite_portrait_398x224_with_softmax.tar.gz",
28
+ "deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax":
29
+ "https://paddleseg.bj.bcebos.com/dygraph/humanseg/export/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax.zip",
30
+ "fcn_hrnetw18_small_v1_humanseg_192x192_with_softmax":
31
+ "https://paddleseg.bj.bcebos.com/dygraph/humanseg/export/fcn_hrnetw18_small_v1_humanseg_192x192_with_softmax.zip",
32
+ "pphumanseg_lite_generic_humanseg_192x192_with_softmax":
33
+ "https://paddleseg.bj.bcebos.com/dygraph/humanseg/export/pphumanseg_lite_generic_192x192_with_softmax.zip",
34
+ }
35
+
36
+ if __name__ == "__main__":
37
+ for model_name, url in model_urls.items():
38
+ download_file_and_uncompress(
39
+ url=url,
40
+ savepath=LOCAL_PATH,
41
+ extrapath=LOCAL_PATH,
42
+ extraname=model_name)
43
+
44
+ print("Export model download success!")
stylegan_human/PP_HumanSeg/pretrained_model/download_pretrained_model.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf8
2
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import sys
17
+ import os
18
+
19
+ LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
20
+ TEST_PATH = os.path.join(LOCAL_PATH, "../../../", "test")
21
+ sys.path.append(TEST_PATH)
22
+
23
+ from paddleseg.utils.download import download_file_and_uncompress
24
+
25
+ model_urls = {
26
+ "pphumanseg_lite_portrait_398x224":
27
+ "https://paddleseg.bj.bcebos.com/dygraph/ppseg/ppseg_lite_portrait_398x224.tar.gz",
28
+ "deeplabv3p_resnet50_os8_humanseg_512x512_100k":
29
+ "https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/deeplabv3p_resnet50_os8_humanseg_512x512_100k.zip",
30
+ "fcn_hrnetw18_small_v1_humanseg_192x192":
31
+ "https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/fcn_hrnetw18_small_v1_humanseg_192x192.zip",
32
+ "pphumanseg_lite_generic_human_192x192":
33
+ "https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/pphumanseg_lite_generic_192x192.zip",
34
+ }
35
+
36
+ if __name__ == "__main__":
37
+ for model_name, url in model_urls.items():
38
+ download_file_and_uncompress(
39
+ url=url,
40
+ savepath=LOCAL_PATH,
41
+ extrapath=LOCAL_PATH,
42
+ extraname=model_name)
43
+
44
+ print("Pretrained model download success!")
stylegan_human/README.md ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # StyleGAN-Human: A Data-Centric Odyssey of Human Generation
2
+ <img src="./img/demo_V5_thumbnails-min.png" width="96%" height="96%">
3
+
4
+ <!--
5
+ **stylegan-human/StyleGAN-Human** is a ✨ _special_ ✨ repository because its `README.md` (this file) appears on your GitHub profile.
6
+
7
+ -->
8
+
9
+ >
10
+ >
11
+ > **Abstract:** *Unconditional human image generation is an important task in vision and graphics, which enables various applications in the creative industry. Existing studies in this field mainly focus on "network engineering" such as designing new components and objective functions. This work takes a data-centric perspective and investigates multiple critical aspects in "data engineering", which we believe would complement the current practice. To facilitate a comprehensive study, we collect and annotate a large-scale human image dataset with over 230K samples capturing diverse poses and textures. Equipped with this large dataset, we rigorously investigate three essential factors in data engineering for StyleGAN-based human generation, namely data size, data distribution, and data alignment. Extensive experiments reveal several valuable observations w.r.t. these aspects: 1) Large-scale data, more than 40K images, are needed to train a high-fidelity unconditional human generation model with vanilla StyleGAN. 2) A balanced training set helps improve the generation quality with rare face poses compared to the long-tailed counterpart, whereas simply balancing the clothing texture distribution does not effectively bring an improvement. 3) Human GAN models with body centers for alignment outperform models trained using face centers or pelvis points as alignment anchors. In addition, a model zoo and human editing applications are demonstrated to facilitate future research in the community.* <br>
12
+ **Keyword:** Human Image Generation, Data-Centric, StyleGAN
13
+
14
+ [Jianglin Fu](mailto:[email protected]), [Shikai Li](mailto:[email protected]), [Yuming Jiang](https://yumingj.github.io/), [Kwan-Yee Lin](https://kwanyeelin.github.io/), [Chen Qian](https://scholar.google.com/citations?user=AerkT0YAAAAJ&hl=zh-CN), [Chen Change Loy](https://www.mmlab-ntu.com/person/ccloy/), [Wayne Wu](https://wywu.github.io/), and [Ziwei Liu](https://liuziwei7.github.io/) <br>
15
+ **[[Demo Video]](https://youtu.be/nIrb9hwsdcI)** | **[[Project Page]](https://stylegan-human.github.io/)** | **[[Paper]](https://arxiv.org/pdf/2204.11823.pdf)**
16
+
17
+ ## Updates
18
+ - [20/07/2022] [SHHQ-1.0](./docs/Dataset.md) dataset with 40K images is released! :sparkles:
19
+ - [15/06/2022] Data alignment and real-image inversion scripts are released.
20
+ - [26/04/2022] Technical report released!
21
+ - [22/04/2022] Technical report will be released before May.
22
+ - [21/04/2022] The codebase and project page are created.
23
+
24
+ ## Data Download
25
+ The first version SHHQ-1.0, with 40K images is released. To download and use the dataset set, please read the instructions in [Dataset.md](./docs/Dataset.md)
26
+
27
+ (We are currently facing large incoming applications, and we need to carefully verify all the applicants, please be patient, and we will reply to you as soon as possible.οΌ‰
28
+
29
+ ## Model Zoo
30
+
31
+ | Structure | 1024x512 | Metric | Scores | 512x256 | Metric | Scores |
32
+ | --------- |:----------:| :----------:| :----------:| :-----: | :-----: | :-----: |
33
+ | StyleGAN1 |[stylegan_human_v1_1024.pkl](https://drive.google.com/file/d/1h-R-IV-INGdPEzj4P9ml6JTEvihuNgLX/view?usp=sharing)| fid50k | 3.79 | to be released | - | - |
34
+ | StyleGAN2 |[stylegan_human_v2_1024.pkl](https://drive.google.com/file/d/1FlAb1rYa0r_--Zj_ML8e6shmaF28hQb5/view?usp=sharing)| fid50k_full | 1.57 |[stylegan_human_v2_512.pkl](https://drive.google.com/file/d/1dlFEHbu-WzQWJl7nBBZYcTyo000H9hVm/view?usp=sharing) | fid50k_full | 1.97 |
35
+ | StyleGAN3 |to be released | - | - | [stylegan_human_v3_512.pkl](https://drive.google.com/file/d/1_274jk_N6WSCkKWeu7hjHycqGvbuOFf5/view?usp=sharing) | fid50k_full | 2.54 |
36
+
37
+
38
+
39
+ ## Web Demo
40
+
41
+ Integrated into [Huggingface Spaces πŸ€—](https://huggingface.co/spaces) using [Gradio](https://github.com/gradio-app/gradio). Try out the Web Demo for generation: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/hysts/StyleGAN-Human) and interpolation [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/hysts/StyleGAN-Human-Interpolation)
42
+
43
+
44
+
45
+ <a href="https://colab.research.google.com/drive/1sgxoDM55iM07FS54vz9ALg1XckiYA2On"><img src="https://colab.research.google.com/assets/colab-badge.svg" height=22.5></a>
46
+
47
+ We prepare a Colab demo to allow you to synthesize images with the provided models, as well as visualize the performance of style-mixing, interpolation, and attributes editing.
48
+ The notebook will guide you to install the necessary environment and download pretrained models. The output images can be found in `./StyleGAN-Human/outputs/`.
49
+ Hope you enjoy!
50
+
51
+ ## Usage
52
+
53
+ ### System requirements
54
+ * The original code bases are [stylegan (tensorflow)](https://github.com/NVlabs/stylegan), [stylegan2-ada (pytorch)](https://github.com/NVlabs/stylegan2-ada-pytorch), [stylegan3 (pytorch)](https://github.com/NVlabs/stylegan3), released by NVidia
55
+
56
+ * We tested in Python 3.8.5 and PyTorch 1.9.1 with CUDA 11.1. (See https://pytorch.org for PyTorch install instructions.)
57
+
58
+ ### Installation
59
+ To work with this project on your own machine, you need to install the environmnet as follows:
60
+
61
+ ```
62
+ conda env create -f environment.yml
63
+ conda activate stylehuman
64
+ # [Optional: tensorflow 1.x is required for StyleGAN1. ]
65
+ pip install nvidia-pyindex
66
+ pip install nvidia-tensorflow[horovod]
67
+ pip install nvidia-tensorboard==1.15
68
+ ```
69
+ Extra notes:
70
+ 1. In case having some conflicts when calling CUDA version, please try to empty the LD_LIBRARY_PATH. For example:
71
+ ```
72
+ LD_LIBRARY_PATH=; python generate.py --outdir=out/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7
73
+ --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
74
+ ```
75
+
76
+
77
+ 2. We found the following troubleshooting links might be helpful: [1.](https://github.com/NVlabs/stylegan3), [2.](https://github.com/NVlabs/stylegan3/blob/main/docs/troubleshooting.md)
78
+
79
+ ### Train
80
+ The training scripts are based on the original [stylegan1](https://github.com/NVlabs/stylegan), [stylegan2-ada](https://github.com/NVlabs/stylegan2-ada-pytorch), and [stylegan3](https://github.com/NVlabs/stylegan3) with minor changes. Here we only provide the scripts with modifications for SG2 and SG3. You can replace the old files with the provided scripts to train. (assume SHHQ-1.0 is placed under data/)
81
+
82
+ #### Train Stylegan2-ada-pytorch with SHHQ-1.0
83
+ ```
84
+ python train.py --outdir=training_results/sg2/ --data=data/SHHQ-1.0/ \
85
+ --gpus=8 --aug=noaug --mirror=1 --snap=250 --cfg=shhq --square=False
86
+ ```
87
+ #### Train Stylegan3 with SHHQ-1.0
88
+ ```
89
+ python train.py --outdir=training_results/sg3/ --cfg=stylegan3-r --gpus=8 --batch=32 --gamma=12.4 \
90
+ --mirror=1 --aug=noaug --data=data/SHHQ-1.0/ --square=False --snap=250
91
+ ```
92
+
93
+ ### Pretrained models
94
+ Please put the downloaded pretrained models [from above link](#Model-Zoo) under the folder 'pretrained_models'.
95
+
96
+
97
+ ### Generate full-body human images using our pretrained model
98
+ ```
99
+ # Generate human full-body images without truncation
100
+ python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7 --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
101
+
102
+ # Generate human full-body images with truncation
103
+ python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=0.8 --seeds=0-10 --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
104
+
105
+ # Generate human full-body images using stylegan V1
106
+ python generate.py --outdir=outputs/generate/stylegan_human_v1_1024 --network=pretrained_models/stylegan_human_v1_1024.pkl --version 1 --seeds=1,3,5
107
+
108
+ # Generate human full-body images using stylegan V3
109
+ python generate.py --outdir=outputs/generate/stylegan_human_v3_512 --network=pretrained_models/stylegan_human_v3_512.pkl --version 3 --seeds=1,3,5
110
+ ```
111
+
112
+
113
+ #### Note: The following demos are generated based on models related to StyleGAN V2 (stylegan_human_v2_512.pkl and stylegan_human_v2_1024.pkl). If you want to see results for V1 or V3, you need to change the loading method of the corresponding models.
114
+
115
+
116
+ ### Interpolation
117
+ ```
118
+ python interpolation.py --network=pretrained_models/stylegan_human_v2_1024.pkl --seeds=85,100 --outdir=outputs/inter_gifs
119
+ ```
120
+
121
+ ### Style-mixing **image** using stylegan2
122
+ ```
123
+ python style_mixing.py --network=pretrained_models/stylegan_human_v2_1024.pkl --rows=85,100,75,458,1500 \\
124
+ --cols=55,821,1789,293 --styles=0-3 --outdir=outputs/stylemixing
125
+ ```
126
+
127
+ ### Style-mixing **video** using stylegan2
128
+ ```
129
+ python stylemixing_video.py --network=pretrained_models/stylegan_human_v2_1024.pkl --row-seed=3859 \\
130
+ --col-seeds=3098,31759,3791 --col-styles=8-12 --trunc=0.8 --outdir=outputs/stylemixing_video
131
+ ```
132
+
133
+ ### Aligned raw images
134
+ For alignment, we use [openpose-pytorch](https://github.com/Hzzone/pytorch-openpose) for body-keypoints detection and [PaddlePaddle](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.5/contrib/PP-HumanSeg) for human segmentation.
135
+ Before running the alignment script, few models need to be installed:
136
+ 1. download [body_pose_model.pth](https://drive.google.com/drive/folders/1JsvI4M4ZTg98fmnCZLFM-3TeovnCRElG?usp=sharing) and place it into openpose/model/.
137
+ 2. download and extract [deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax](https://paddleseg.bj.bcebos.com/dygraph/humanseg/export/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax.zip) into PP_HumanSeg/export_model/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax.
138
+ 3. download and extract [deeplabv3p_resnet50_os8_humanseg_512x512_100k](https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/deeplabv3p_resnet50_os8_humanseg_512x512_100k.zip) into PP_HumanSeg/pretrained_model/deeplabv3p_resnet50_os8_humanseg_512x512_100k.
139
+ 4. install paddlepaddel: ``` pip install paddleseg ```
140
+
141
+ Then you can start alignment:
142
+ ```
143
+ python alignment.py --image-folder img/test/ --output-folder aligned_image/
144
+ ```
145
+
146
+ ### Invert real image with [PTI](https://github.com/danielroich/PTI)
147
+ Before inversion, please download our PTI weights: [e4e_w+.pt](https://drive.google.com/file/d/1NUfSJqLhsrU7c9PwAtlZ9xtrxhzS_6tu/view?usp=sharing) into /pti/.
148
+
149
+ Few parameters you can change:
150
+ - /pti/pti_configs/hyperparameters.py:
151
+ - first_inv_type = 'w+' -> Use pretrained e4e encoder
152
+ - first_inv_type = 'w' -> Use projection and optimization
153
+ - /pti/pti_configs/paths_config.py:
154
+ - input_data_path: path of real images
155
+ - e4e: path of e4e_w+.pt
156
+ - stylegan2_ada_shhq: pretrained stylegan2-ada model for SHHQ
157
+
158
+ ```
159
+ python run_pti.py
160
+ ```
161
+ Note: we used the test image under 'aligned_image/' (the output of alignment.py), the inverted latent code and fine-tuned generator will be saved in 'outputs/pti/'
162
+
163
+
164
+ ### Editing with InterfaceGAN, StyleSpace, and Sefa
165
+ ```
166
+ python edit.py --network pretrained_models/stylegan_human_v2_1024.pkl --attr_name upper_length \\
167
+ --seeds 61531,61570,61571,61610 --outdir outputs/edit_results
168
+ ```
169
+
170
+ ### Editing using inverted latent code
171
+ ```
172
+ python edit.py ---network outputs/pti/checkpoints/model_test.pkl --attr_name upper_length \\
173
+ --outdir outputs/edit_results --real True --real_w_path outputs/pti/embeddings/test/PTI/test/0.pt --real_img_path aligned_image/test.png
174
+ ```
175
+
176
+ Note:
177
+ 1. ''upper_length'' and ''bottom_length'' of ''attr_name'' are available for demo.
178
+ 2. Layers to control and editing strength are set in edit/edit_config.py.
179
+
180
+
181
+ ### Demo for [InsetGAN](https://arxiv.org/abs/2203.07293)
182
+
183
+ We implement a quick demo using the key idea from InsetGAN: combining the face generated by FFHQ with the human-body generated by our pretrained model, optimizing both face and body latent codes to get a coherent full-body image.
184
+ Before running the script, you need to download the [FFHQ face model]( https://docs.google.com/uc?export=download&confirm=t&id=125OG7SMkXI-Kf2aqiwLLHyCvSW-gZk3M), or you can use your own face model, as well as [pretrained face landmark](https://docs.google.com/uc?export=download&confirm=&id=1A82DnJBJzt8wI2J8ZrCK5fgHcQ2-tcWM) and [pretrained CNN face detection model for dlib](https://docs.google.com/uc?export=download&confirm=&id=1MduBgju5KFNrQfDLoQXJ_1_h5MnctCIG)
185
+ ```
186
+ python insetgan.py --body_network=pretrained_models/stylegan_human_v2_1024.pkl --face_network=pretrained_models/ffhq.pkl \\
187
+ --body_seed=82 --face_seed=43 --trunc=0.6 --outdir=outputs/insetgan/ --video 1
188
+ ```
189
+
190
+ ## Results
191
+
192
+ ### Editing with inverted real image
193
+ (from left to right: real image | inverted image | InterFaceGAN result | StyleSpace result | SeFa result)
194
+
195
+ https://user-images.githubusercontent.com/98547009/173773800-bb7fe54a-84d3-4b30-9864-a6b7b311f8ff.mp4
196
+
197
+
198
+ ### For more demo, please visit our [**web page**](https://stylegan-human.github.io/) .
199
+
200
+
201
+ ## TODO List
202
+
203
+ - [ ] Release 1024x512 version of StyleGAN-Human based on StyleGAN3
204
+ - [ ] Release 512x256 version of StyleGAN-Human based on StyleGAN1
205
+ - [ ] Extension of downstream application (InsetGAN): Add face inversion interface to support fusing user face image and stylegen-human body image
206
+ - [x] Add Inversion Script into the provided editing pipeline
207
+ - [ ] Release Dataset
208
+
209
+
210
+ ## Related Works
211
+ * (SIGGRAPH 2022) **Text2Human: Text-Driven Controllable Human Image Generation**, Yuming Jiang et al. [[Paper](https://arxiv.org/pdf/2205.15996.pdf)], [[Code](https://github.com/yumingj/Text2Human)], [[Project Page](https://yumingj.github.io/projects/Text2Human.html)], [[Dataset](https://github.com/yumingj/DeepFashion-MultiModal)]
212
+ * (ICCV 2021) **Talk-to-Edit: Fine-Grained Facial Editing via Dialog**, Yuming Jiang et al. [[Paper](https://arxiv.org/abs/2109.04425)], [[Code](https://github.com/yumingj/Talk-to-Edit)], [[Project Page](https://www.mmlab-ntu.com/project/talkedit/)], [[Dataset](https://mmlab.ie.cuhk.edu.hk/projects/CelebA/CelebA_Dialog.html)]
213
+ * (Technical Report 2022) **Generalizable Neural Performer: Learning Robust Radiance Fields for Human Novel View Synthesis**, Wei Cheng et al. [[Paper](https://arxiv.org/pdf/2204.11798.pdf)], [[Code](https://github.com/generalizable-neural-performer/gnr)], [[Project Page](https://generalizable-neural-performer.github.io/)], [[Dataset](https://generalizable-neural-performer.github.io/genebody.html)]
214
+
215
+ ## Citation
216
+
217
+ If you find this work useful for your research, please consider citing our paper:
218
+
219
+ ```bibtex
220
+ @article{fu2022styleganhuman,
221
+ title={StyleGAN-Human: A Data-Centric Odyssey of Human Generation},
222
+ author={Fu, Jianglin and Li, Shikai and Jiang, Yuming and Lin, Kwan-Yee and Qian, Chen and Loy, Chen-Change and Wu, Wayne and Liu, Ziwei},
223
+ journal = {arXiv preprint},
224
+ volume = {arXiv:2204.11823},
225
+ year = {2022}
226
+ ```
227
+
228
+ ## Acknowlegement
229
+ Part of the code is borrowed from [stylegan (tensorflow)](https://github.com/NVlabs/stylegan), [stylegan2-ada (pytorch)](https://github.com/NVlabs/stylegan2-ada-pytorch), [stylegan3 (pytorch)](https://github.com/NVlabs/stylegan3).
stylegan_human/__init__.py ADDED
File without changes
stylegan_human/alignment.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+
4
+ import os
5
+ import argparse
6
+ import numpy as np
7
+ import torch
8
+ from torch.utils.data import DataLoader
9
+ from torchvision.transforms import transforms
10
+ from utils.ImagesDataset import ImagesDataset
11
+
12
+ import cv2
13
+ import time
14
+ import copy
15
+ import imutils
16
+
17
+ # for openpose body keypoint detector : # (src:https://github.com/Hzzone/pytorch-openpose)
18
+ from openpose.src import util
19
+ from openpose.src.body import Body
20
+
21
+ # for paddlepaddle human segmentation : #(src: https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.5/contrib/PP-HumanSeg/)
22
+ from PP_HumanSeg.deploy.infer import Predictor as PP_HumenSeg_Predictor
23
+
24
+ import math
25
+ def angle_between_points(p0,p1,p2):
26
+ if p0[1]==-1 or p1[1]==-1 or p2[1]==-1:
27
+ return -1
28
+ a = (p1[0]-p0[0])**2 + (p1[1]-p0[1])**2
29
+ b = (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2
30
+ c = (p2[0]-p0[0])**2 + (p2[1]-p0[1])**2
31
+ if a * b == 0:
32
+ return -1
33
+ return math.acos((a+b-c) / math.sqrt(4*a*b)) * 180 / math.pi
34
+
35
+
36
+ def crop_img_with_padding(img, keypoints, rect):
37
+ person_xmin,person_xmax, ymin, ymax= rect
38
+ img_h,img_w,_ = img.shape ## find body center using keypoints
39
+ middle_shoulder_x = keypoints[1][0]
40
+ middle_hip_x = (keypoints[8][0] + keypoints[11][0]) // 2
41
+ mid_x = (middle_hip_x + middle_shoulder_x) // 2
42
+ mid_y = (ymin + ymax) // 2
43
+ ## find which side (l or r) is further than center x, use the further side
44
+ if abs(mid_x-person_xmin) > abs(person_xmax-mid_x): #left further
45
+ xmin = person_xmin
46
+ xmax = mid_x + (mid_x-person_xmin)
47
+ else:
48
+ ############### may be negtive
49
+ ### in this case, the script won't output any image, leave the case like this
50
+ ### since we don't want to pad human body
51
+ xmin = mid_x - (person_xmax-mid_x)
52
+ xmax = person_xmax
53
+
54
+ w = xmax - xmin
55
+ h = ymax - ymin
56
+ ## pad rectangle to w:h = 1:2 ## calculate desired border length
57
+ if h / w >= 2: #pad horizontally
58
+ target_w = h // 2
59
+ xmin_prime = int(mid_x - target_w / 2)
60
+ xmax_prime = int(mid_x + target_w / 2)
61
+ if xmin_prime < 0:
62
+ pad_left = abs(xmin_prime)# - xmin
63
+ xmin = 0
64
+ else:
65
+ pad_left = 0
66
+ xmin = xmin_prime
67
+ if xmax_prime > img_w:
68
+ pad_right = xmax_prime - img_w
69
+ xmax = img_w
70
+ else:
71
+ pad_right = 0
72
+ xmax = xmax_prime
73
+
74
+ cropped_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
75
+ im_pad = cv2.copyMakeBorder(cropped_img, 0, 0, int(pad_left), int(pad_right), cv2.BORDER_REPLICATE)
76
+ else: #pad vertically
77
+ target_h = w * 2
78
+ ymin_prime = mid_y - (target_h / 2)
79
+ ymax_prime = mid_y + (target_h / 2)
80
+ if ymin_prime < 0:
81
+ pad_up = abs(ymin_prime)# - ymin
82
+ ymin = 0
83
+ else:
84
+ pad_up = 0
85
+ ymin = ymin_prime
86
+ if ymax_prime > img_h:
87
+ pad_down = ymax_prime - img_h
88
+ ymax = img_h
89
+ else:
90
+ pad_down = 0
91
+ ymax = ymax_prime
92
+ print(ymin,ymax, xmin,xmax, img.shape)
93
+
94
+ cropped_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
95
+ im_pad = cv2.copyMakeBorder(cropped_img, int(pad_up), int(pad_down), 0,
96
+ 0, cv2.BORDER_REPLICATE)
97
+ result = cv2.resize(im_pad,(512,1024),interpolation = cv2.INTER_AREA)
98
+ return result
99
+
100
+
101
+ def run(args):
102
+ os.makedirs(args.output_folder, exist_ok=True)
103
+ dataset = ImagesDataset(args.image_folder, transforms.Compose([transforms.ToTensor()]))
104
+ dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
105
+
106
+ body_estimation = Body('openpose/model/body_pose_model.pth')
107
+
108
+ total = len(dataloader)
109
+ print('Num of dataloader : ', total)
110
+ os.makedirs(f'{args.output_folder}', exist_ok=True)
111
+ # os.makedirs(f'{args.output_folder}/middle_result', exist_ok=True)
112
+
113
+ ## initialzide HumenSeg
114
+ human_seg_args = {}
115
+ human_seg_args['cfg'] = 'PP_HumanSeg/export_model/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax/deploy.yaml'
116
+ human_seg_args['input_shape'] = [1024,512]
117
+ human_seg_args['save_dir'] = args.output_folder
118
+ human_seg_args['soft_predict'] = False
119
+ human_seg_args['use_gpu'] = True
120
+ human_seg_args['test_speed'] = False
121
+ human_seg_args['use_optic_flow'] = False
122
+ human_seg_args['add_argmax'] = True
123
+ human_seg_args= argparse.Namespace(**human_seg_args)
124
+ human_seg = PP_HumenSeg_Predictor(human_seg_args)
125
+
126
+ from tqdm import tqdm
127
+ for fname, image in tqdm(dataloader):
128
+ # try:
129
+ ## tensor to numpy image
130
+ fname = fname[0]
131
+ print(f'Processing \'{fname}\'.')
132
+
133
+ image = (image.permute(0, 2, 3, 1) * 255).clamp(0, 255)
134
+ image = image.squeeze(0).numpy() # --> tensor to numpy, (H,W,C)
135
+ # avoid super high res img
136
+ if image.shape[0] >= 2000: # height ### for shein image
137
+ ratio = image.shape[0]/1200 #height
138
+ dim = (int(image.shape[1]/ratio),1200)#(width, height)
139
+ image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
140
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
141
+
142
+ ## create segmentation
143
+ # mybg = cv2.imread('mybg.png')
144
+ comb, segmentation, bg, ori_img = human_seg.run(image,None) #mybg)
145
+ # cv2.imwrite('comb.png',comb) # [0,255]
146
+ # cv2.imwrite('alpha.png',segmentation*255) # segmentation [0,1] --> [0.255]
147
+ # cv2.imwrite('bg.png',bg) #[0,255]
148
+ # cv2.imwrite('ori_img.png',ori_img) # [0,255]
149
+
150
+ masks_np = (segmentation* 255)# .byte().cpu().numpy() #1024,512,1
151
+ mask0_np = masks_np[:,:,0].astype(np.uint8)#[0, :, :]
152
+ contours = cv2.findContours(mask0_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
153
+ cnts = imutils.grab_contours(contours)
154
+ c = max(cnts, key=cv2.contourArea)
155
+ extTop = tuple(c[c[:, :, 1].argmin()][0])
156
+ extBot = tuple(c[c[:, :, 1].argmax()][0])
157
+ extBot = list(extBot)
158
+ extTop = list(extTop)
159
+ pad_range = int((extBot[1]-extTop[1])*0.05)
160
+ if (int(extTop[1])<=5 and int(extTop[1])>0) and (comb.shape[0]>int(extBot[1]) and int(extBot[1])>=comb.shape[0]-5): #seg mask already reaches to the edge
161
+ #pad with pure white, top 100 px, bottom 100 px
162
+ comb= cv2.copyMakeBorder(comb,pad_range+5,pad_range+5,0,0,cv2.BORDER_CONSTANT,value=[255,255,255])
163
+ elif int(extTop[1])<=0 or int(extBot[1])>=comb.shape[0]:
164
+ print('PAD: body out of boundary', fname) #should not happened
165
+ return {}
166
+ else:
167
+ comb = cv2.copyMakeBorder(comb, pad_range+5, pad_range+5, 0, 0, cv2.BORDER_REPLICATE) #105 instead of 100: give some extra space
168
+ extBot[1] = extBot[1] + pad_range+5
169
+ extTop[1] = extTop[1] + pad_range+5
170
+
171
+ extLeft = tuple(c[c[:, :, 0].argmin()][0])
172
+ extRight = tuple(c[c[:, :, 0].argmax()][0])
173
+ extLeft = list(extLeft)
174
+ extRight = list(extRight)
175
+ person_ymin = int(extTop[1])-pad_range # 100
176
+ person_ymax = int(extBot[1])+pad_range # 100 #height
177
+ if person_ymin<0 or person_ymax>comb.shape[0]: # out of range
178
+ return {}
179
+ person_xmin = int(extLeft[0])
180
+ person_xmax = int(extRight[0])
181
+ rect = [person_xmin,person_xmax,person_ymin, person_ymax]
182
+ # recimg = copy.deepcopy(comb)
183
+ # cv2.rectangle(recimg,(person_xmin,person_ymin),(person_xmax,person_ymax),(0,255,0),2)
184
+ # cv2.imwrite(f'{args.output_folder}/middle_result/{fname}_rec.png',recimg)
185
+
186
+ ## detect keypoints
187
+ keypoints, subset = body_estimation(comb)
188
+ # print(keypoints, subset, len(subset))
189
+ if len(subset) != 1 or (len(subset)==1 and subset[0][-1]<15):
190
+ print(f'Processing \'{fname}\'. Please import image contains one person only. Also can check segmentation mask. ')
191
+ continue
192
+
193
+ # canvas = copy.deepcopy(comb)
194
+ # canvas = util.draw_bodypose(canvas, keypoints, subset, show_number=True)
195
+ # cv2.imwrite(f'{args.output_folder}/middle_result/{fname}_keypoints.png',canvas)
196
+
197
+ comb = crop_img_with_padding(comb, keypoints, rect)
198
+
199
+
200
+ cv2.imwrite(f'{args.output_folder}/{fname}.png', comb)
201
+ print(f' -- Finished processing \'{fname}\'. --')
202
+ # except:
203
+ # print(f'Processing \'{fname}\'. Not satisfied the alignment strategy.')
204
+
205
+
206
+ if __name__ == '__main__':
207
+ torch.backends.cudnn.benchmark = True
208
+ torch.backends.cudnn.deterministic = False
209
+
210
+ t1 = time.time()
211
+ arg_formatter = argparse.ArgumentDefaultsHelpFormatter
212
+ description = 'StyleGAN-Human data process'
213
+ parser = argparse.ArgumentParser(formatter_class=arg_formatter,
214
+ description=description)
215
+ parser.add_argument('--image-folder', type=str, dest='image_folder')
216
+ parser.add_argument('--output-folder', dest='output_folder', default='results', type=str)
217
+ # parser.add_argument('--cfg', dest='cfg for segmentation', default='PP_HumanSeg/export_model/ppseg_lite_portrait_398x224_with_softmax/deploy.yaml', type=str)
218
+
219
+ print('parsing arguments')
220
+ cmd_args = parser.parse_args()
221
+ run(cmd_args)
222
+
223
+ print('total time elapsed: ', str(time.time() - t1))
stylegan_human/bg_white.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ import os
4
+ import click
5
+ import cv2
6
+ import numpy as np
7
+
8
+ def bg_white(seg, raw, blur_level=3, gaussian=81):
9
+ seg = cv2.blur(seg, (blur_level, blur_level))
10
+
11
+ empty = np.ones_like(seg)
12
+ seg_bg = (empty - seg) * 255
13
+ seg_bg = cv2.GaussianBlur(seg_bg,(gaussian,gaussian),0)
14
+
15
+ background_mask = cv2.cvtColor(255 - cv2.cvtColor(seg, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)
16
+ masked_fg = (raw * (1 / 255)) * (seg * (1 / 255))
17
+ masked_bg = (seg_bg * (1 / 255)) * (background_mask * (1 / 255))
18
+
19
+ frame = np.uint8(cv2.add(masked_bg,masked_fg)*255)
20
+
21
+ return frame
22
+
23
+
24
+ """
25
+ To turn background into white.
26
+
27
+ Examples:
28
+
29
+ \b
30
+ python bg_white.py --raw_img_dir=./SHHQ-1.0/no_segment/ --raw_seg_dir=./SHHQ-1.0/segments/ \\
31
+ --outdir=./SHHQ-1.0/bg_white/
32
+ """
33
+
34
+ @click.command()
35
+ @click.pass_context
36
+ @click.option('--raw_img_dir', default="./SHHQ-1.0/no_segment/", help='folder of raw image', required=True)
37
+ @click.option('--raw_seg_dir', default='./SHHQ-1.0/segments/', help='folder of segmentation masks', required=True)
38
+ @click.option('--outdir', help='Where to save the output images', default= "./SHHQ-1.0/bg_white/" , type=str, required=True, metavar='DIR')
39
+
40
+ def main(
41
+ ctx: click.Context,
42
+ raw_img_dir: str,
43
+ raw_seg_dir: str,
44
+ outdir: str):
45
+ os.makedirs(outdir, exist_ok=True)
46
+ files = os.listdir(raw_img_dir)
47
+ for file in files:
48
+ print(file)
49
+ raw = cv2.imread(os.path.join(raw_img_dir, file))
50
+ seg = cv2.imread(os.path.join(raw_seg_dir, file))
51
+ assert raw is not None
52
+ assert seg is not None
53
+ white_frame = bg_white(seg, raw)
54
+ cv2.imwrite(os.path.join(outdir,file), white_frame)
55
+
56
+ if __name__ == "__main__":
57
+ main()
stylegan_human/dnnlib/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ο»Ώ# Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
6
+ # and proprietary rights in and to this software, related documentation
7
+ # and any modifications thereto. Any use, reproduction, disclosure or
8
+ # distribution of this software and related documentation without an express
9
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
10
+
11
+ from .util import EasyDict, make_cache_dir_path
stylegan_human/dnnlib/tflib/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ #
5
+ # This work is made available under the Nvidia Source Code License-NC.
6
+ # To view a copy of this license, visit
7
+ # https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ from . import autosummary
10
+ from . import network
11
+ from . import optimizer
12
+ from . import tfutil
13
+ from . import custom_ops
14
+
15
+ from .tfutil import *
16
+ from .network import Network
17
+
18
+ from .optimizer import Optimizer
19
+
20
+ from .custom_ops import get_plugin
stylegan_human/dnnlib/tflib/autosummary.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ #
5
+ # This work is made available under the Nvidia Source Code License-NC.
6
+ # To view a copy of this license, visit
7
+ # https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ """Helper for adding automatically tracked values to Tensorboard.
10
+
11
+ Autosummary creates an identity op that internally keeps track of the input
12
+ values and automatically shows up in TensorBoard. The reported value
13
+ represents an average over input components. The average is accumulated
14
+ constantly over time and flushed when save_summaries() is called.
15
+
16
+ Notes:
17
+ - The output tensor must be used as an input for something else in the
18
+ graph. Otherwise, the autosummary op will not get executed, and the average
19
+ value will not get accumulated.
20
+ - It is perfectly fine to include autosummaries with the same name in
21
+ several places throughout the graph, even if they are executed concurrently.
22
+ - It is ok to also pass in a python scalar or numpy array. In this case, it
23
+ is added to the average immediately.
24
+ """
25
+
26
+ from collections import OrderedDict
27
+ import numpy as np
28
+ import tensorflow as tf
29
+ from tensorboard import summary as summary_lib
30
+ from tensorboard.plugins.custom_scalar import layout_pb2
31
+
32
+ from . import tfutil
33
+ from .tfutil import TfExpression
34
+ from .tfutil import TfExpressionEx
35
+
36
+ # Enable "Custom scalars" tab in TensorBoard for advanced formatting.
37
+ # Disabled by default to reduce tfevents file size.
38
+ enable_custom_scalars = False
39
+
40
+ _dtype = tf.float64
41
+ _vars = OrderedDict() # name => [var, ...]
42
+ _immediate = OrderedDict() # name => update_op, update_value
43
+ _finalized = False
44
+ _merge_op = None
45
+
46
+
47
+ def _create_var(name: str, value_expr: TfExpression) -> TfExpression:
48
+ """Internal helper for creating autosummary accumulators."""
49
+ assert not _finalized
50
+ name_id = name.replace("/", "_")
51
+ v = tf.cast(value_expr, _dtype)
52
+
53
+ if v.shape.is_fully_defined():
54
+ size = np.prod(v.shape.as_list())
55
+ size_expr = tf.constant(size, dtype=_dtype)
56
+ else:
57
+ size = None
58
+ size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype))
59
+
60
+ if size == 1:
61
+ if v.shape.ndims != 0:
62
+ v = tf.reshape(v, [])
63
+ v = [size_expr, v, tf.square(v)]
64
+ else:
65
+ v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))]
66
+ v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype))
67
+
68
+ with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.control_dependencies(None):
69
+ var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)]
70
+ update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
71
+
72
+ if name in _vars:
73
+ _vars[name].append(var)
74
+ else:
75
+ _vars[name] = [var]
76
+ return update_op
77
+
78
+
79
+ def autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx = None, condition: TfExpressionEx = True) -> TfExpressionEx:
80
+ """Create a new autosummary.
81
+
82
+ Args:
83
+ name: Name to use in TensorBoard
84
+ value: TensorFlow expression or python value to track
85
+ passthru: Optionally return this TF node without modifications but tack an autosummary update side-effect to this node.
86
+
87
+ Example use of the passthru mechanism:
88
+
89
+ n = autosummary('l2loss', loss, passthru=n)
90
+
91
+ This is a shorthand for the following code:
92
+
93
+ with tf.control_dependencies([autosummary('l2loss', loss)]):
94
+ n = tf.identity(n)
95
+ """
96
+ tfutil.assert_tf_initialized()
97
+ name_id = name.replace("/", "_")
98
+
99
+ if tfutil.is_tf_expression(value):
100
+ with tf.name_scope("summary_" + name_id), tf.device(value.device):
101
+ condition = tf.convert_to_tensor(condition, name='condition')
102
+ update_op = tf.cond(condition, lambda: tf.group(_create_var(name, value)), tf.no_op)
103
+ with tf.control_dependencies([update_op]):
104
+ return tf.identity(value if passthru is None else passthru)
105
+
106
+ else: # python scalar or numpy array
107
+ assert not tfutil.is_tf_expression(passthru)
108
+ assert not tfutil.is_tf_expression(condition)
109
+ if condition:
110
+ if name not in _immediate:
111
+ with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.device(None), tf.control_dependencies(None):
112
+ update_value = tf.placeholder(_dtype)
113
+ update_op = _create_var(name, update_value)
114
+ _immediate[name] = update_op, update_value
115
+ update_op, update_value = _immediate[name]
116
+ tfutil.run(update_op, {update_value: value})
117
+ return value if passthru is None else passthru
118
+
119
+
120
+ def finalize_autosummaries() -> None:
121
+ """Create the necessary ops to include autosummaries in TensorBoard report.
122
+ Note: This should be done only once per graph.
123
+ """
124
+ global _finalized
125
+ tfutil.assert_tf_initialized()
126
+
127
+ if _finalized:
128
+ return None
129
+
130
+ _finalized = True
131
+ tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])
132
+
133
+ # Create summary ops.
134
+ with tf.device(None), tf.control_dependencies(None):
135
+ for name, vars_list in _vars.items():
136
+ name_id = name.replace("/", "_")
137
+ with tfutil.absolute_name_scope("Autosummary/" + name_id):
138
+ moments = tf.add_n(vars_list)
139
+ moments /= moments[0]
140
+ with tf.control_dependencies([moments]): # read before resetting
141
+ reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]
142
+ with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
143
+ mean = moments[1]
144
+ std = tf.sqrt(moments[2] - tf.square(moments[1]))
145
+ tf.summary.scalar(name, mean)
146
+ if enable_custom_scalars:
147
+ tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std)
148
+ tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std)
149
+
150
+ # Setup layout for custom scalars.
151
+ layout = None
152
+ if enable_custom_scalars:
153
+ cat_dict = OrderedDict()
154
+ for series_name in sorted(_vars.keys()):
155
+ p = series_name.split("/")
156
+ cat = p[0] if len(p) >= 2 else ""
157
+ chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1]
158
+ if cat not in cat_dict:
159
+ cat_dict[cat] = OrderedDict()
160
+ if chart not in cat_dict[cat]:
161
+ cat_dict[cat][chart] = []
162
+ cat_dict[cat][chart].append(series_name)
163
+ categories = []
164
+ for cat_name, chart_dict in cat_dict.items():
165
+ charts = []
166
+ for chart_name, series_names in chart_dict.items():
167
+ series = []
168
+ for series_name in series_names:
169
+ series.append(layout_pb2.MarginChartContent.Series(
170
+ value=series_name,
171
+ lower="xCustomScalars/" + series_name + "/margin_lo",
172
+ upper="xCustomScalars/" + series_name + "/margin_hi"))
173
+ margin = layout_pb2.MarginChartContent(series=series)
174
+ charts.append(layout_pb2.Chart(title=chart_name, margin=margin))
175
+ categories.append(layout_pb2.Category(title=cat_name, chart=charts))
176
+ layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
177
+ return layout
178
+
179
+ def save_summaries(file_writer, global_step=None):
180
+ """Call FileWriter.add_summary() with all summaries in the default graph,
181
+ automatically finalizing and merging them on the first call.
182
+ """
183
+ global _merge_op
184
+ tfutil.assert_tf_initialized()
185
+
186
+ if _merge_op is None:
187
+ layout = finalize_autosummaries()
188
+ if layout is not None:
189
+ file_writer.add_summary(layout)
190
+ with tf.device(None), tf.control_dependencies(None):
191
+ _merge_op = tf.summary.merge_all()
192
+
193
+ file_writer.add_summary(_merge_op.eval(), global_step)
stylegan_human/dnnlib/tflib/custom_ops.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ #
5
+ # This work is made available under the Nvidia Source Code License-NC.
6
+ # To view a copy of this license, visit
7
+ # https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ """TensorFlow custom ops builder.
10
+ """
11
+
12
+ import os
13
+ import re
14
+ import uuid
15
+ import hashlib
16
+ import tempfile
17
+ import shutil
18
+ import tensorflow as tf
19
+ from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
20
+
21
+ #----------------------------------------------------------------------------
22
+ # Global options.
23
+
24
+ cuda_cache_path = os.path.join(os.path.dirname(__file__), '_cudacache')
25
+ cuda_cache_version_tag = 'v1'
26
+ do_not_hash_included_headers = False # Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe!
27
+ verbose = True # Print status messages to stdout.
28
+
29
+ compiler_bindir_search_path = [
30
+ 'C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.14.26428/bin/Hostx64/x64',
31
+ 'C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.23.28105/bin/Hostx64/x64',
32
+ 'C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin',
33
+ ]
34
+
35
+ #----------------------------------------------------------------------------
36
+ # Internal helper funcs.
37
+
38
+ def _find_compiler_bindir():
39
+ for compiler_path in compiler_bindir_search_path:
40
+ if os.path.isdir(compiler_path):
41
+ return compiler_path
42
+ return None
43
+
44
+ def _get_compute_cap(device):
45
+ caps_str = device.physical_device_desc
46
+ m = re.search('compute capability: (\\d+).(\\d+)', caps_str)
47
+ major = m.group(1)
48
+ minor = m.group(2)
49
+ return (major, minor)
50
+
51
+ def _get_cuda_gpu_arch_string():
52
+ gpus = [x for x in device_lib.list_local_devices() if x.device_type == 'GPU']
53
+ if len(gpus) == 0:
54
+ raise RuntimeError('No GPU devices found')
55
+ (major, minor) = _get_compute_cap(gpus[0])
56
+ return 'sm_%s%s' % (major, minor)
57
+
58
+ def _run_cmd(cmd):
59
+ with os.popen(cmd) as pipe:
60
+ output = pipe.read()
61
+ status = pipe.close()
62
+ if status is not None:
63
+ raise RuntimeError('NVCC returned an error. See below for full command line and output log:\n\n%s\n\n%s' % (cmd, output))
64
+
65
+ def _prepare_nvcc_cli(opts):
66
+ cmd = 'nvcc ' + opts.strip()
67
+ cmd += ' --disable-warnings'
68
+ cmd += ' --include-path "%s"' % tf.sysconfig.get_include()
69
+ cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src')
70
+ cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'com_google_absl')
71
+ cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'eigen_archive')
72
+
73
+ compiler_bindir = _find_compiler_bindir()
74
+ if compiler_bindir is None:
75
+ # Require that _find_compiler_bindir succeeds on Windows. Allow
76
+ # nvcc to use whatever is the default on Linux.
77
+ if os.name == 'nt':
78
+ raise RuntimeError('Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in "%s".' % __file__)
79
+ else:
80
+ cmd += ' --compiler-bindir "%s"' % compiler_bindir
81
+ cmd += ' 2>&1'
82
+ return cmd
83
+
84
+ #----------------------------------------------------------------------------
85
+ # Main entry point.
86
+
87
+ _plugin_cache = dict()
88
+
89
+ def get_plugin(cuda_file):
90
+ cuda_file_base = os.path.basename(cuda_file)
91
+ cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base)
92
+
93
+ # Already in cache?
94
+ if cuda_file in _plugin_cache:
95
+ return _plugin_cache[cuda_file]
96
+
97
+ # Setup plugin.
98
+ if verbose:
99
+ print('Setting up TensorFlow plugin "%s": ' % cuda_file_base, end='', flush=True)
100
+ try:
101
+ # Hash CUDA source.
102
+ md5 = hashlib.md5()
103
+ with open(cuda_file, 'rb') as f:
104
+ md5.update(f.read())
105
+ md5.update(b'\n')
106
+
107
+ # Hash headers included by the CUDA code by running it through the preprocessor.
108
+ if not do_not_hash_included_headers:
109
+ if verbose:
110
+ print('Preprocessing... ', end='', flush=True)
111
+ with tempfile.TemporaryDirectory() as tmp_dir:
112
+ tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext)
113
+ _run_cmd(_prepare_nvcc_cli('"%s" --preprocess -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir)))
114
+ with open(tmp_file, 'rb') as f:
115
+ bad_file_str = ('"' + cuda_file.replace('\\', '/') + '"').encode('utf-8') # __FILE__ in error check macros
116
+ good_file_str = ('"' + cuda_file_base + '"').encode('utf-8')
117
+ for ln in f:
118
+ if not ln.startswith(b'# ') and not ln.startswith(b'#line '): # ignore line number pragmas
119
+ ln = ln.replace(bad_file_str, good_file_str)
120
+ md5.update(ln)
121
+ md5.update(b'\n')
122
+
123
+ # Select compiler options.
124
+ compile_opts = ''
125
+ if os.name == 'nt':
126
+ compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib')
127
+ elif os.name == 'posix':
128
+ compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so')
129
+ compile_opts += ' --compiler-options \'-fPIC -D_GLIBCXX_USE_CXX11_ABI=0\''
130
+ else:
131
+ assert False # not Windows or Linux, w00t?
132
+ compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string()
133
+ compile_opts += ' --use_fast_math'
134
+ nvcc_cmd = _prepare_nvcc_cli(compile_opts)
135
+
136
+ # Hash build configuration.
137
+ md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n')
138
+ md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n')
139
+ md5.update(('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\n')
140
+
141
+ # Compile if not already compiled.
142
+ bin_file_ext = '.dll' if os.name == 'nt' else '.so'
143
+ bin_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + md5.hexdigest() + bin_file_ext)
144
+ if not os.path.isfile(bin_file):
145
+ if verbose:
146
+ print('Compiling... ', end='', flush=True)
147
+ with tempfile.TemporaryDirectory() as tmp_dir:
148
+ tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + bin_file_ext)
149
+ _run_cmd(nvcc_cmd + ' "%s" --shared -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))
150
+ os.makedirs(cuda_cache_path, exist_ok=True)
151
+ intermediate_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext)
152
+ shutil.copyfile(tmp_file, intermediate_file)
153
+ os.rename(intermediate_file, bin_file) # atomic
154
+
155
+ # Load.
156
+ if verbose:
157
+ print('Loading... ', end='', flush=True)
158
+ plugin = tf.load_op_library(bin_file)
159
+
160
+ # Add to cache.
161
+ _plugin_cache[cuda_file] = plugin
162
+ if verbose:
163
+ print('Done.', flush=True)
164
+ return plugin
165
+
166
+ except:
167
+ if verbose:
168
+ print('Failed!', flush=True)
169
+ raise
170
+
171
+ #----------------------------------------------------------------------------
stylegan_human/dnnlib/tflib/network.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ #
5
+ # This work is made available under the Nvidia Source Code License-NC.
6
+ # To view a copy of this license, visit
7
+ # https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ """Helper for managing networks."""
10
+
11
+ import types
12
+ import inspect
13
+ import re
14
+ import uuid
15
+ import sys
16
+ import numpy as np
17
+ import tensorflow as tf
18
+
19
+ from collections import OrderedDict
20
+ from typing import Any, List, Tuple, Union
21
+
22
+ from . import tfutil
23
+ from .. import util
24
+
25
+ from .tfutil import TfExpression, TfExpressionEx
26
+
27
+ _import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
28
+ _import_module_src = dict() # Source code for temporary modules created during pickle import.
29
+
30
+
31
+ def import_handler(handler_func):
32
+ """Function decorator for declaring custom import handlers."""
33
+ _import_handlers.append(handler_func)
34
+ return handler_func
35
+
36
+
37
+ class Network:
38
+ """Generic network abstraction.
39
+
40
+ Acts as a convenience wrapper for a parameterized network construction
41
+ function, providing several utility methods and convenient access to
42
+ the inputs/outputs/weights.
43
+
44
+ Network objects can be safely pickled and unpickled for long-term
45
+ archival purposes. The pickling works reliably as long as the underlying
46
+ network construction function is defined in a standalone Python module
47
+ that has no side effects or application-specific imports.
48
+
49
+ Args:
50
+ name: Network name. Used to select TensorFlow name and variable scopes.
51
+ func_name: Fully qualified name of the underlying network construction function, or a top-level function object.
52
+ static_kwargs: Keyword arguments to be passed in to the network construction function.
53
+
54
+ Attributes:
55
+ name: User-specified name, defaults to build func name if None.
56
+ scope: Unique TensorFlow scope containing template graph and variables, derived from the user-specified name.
57
+ static_kwargs: Arguments passed to the user-supplied build func.
58
+ components: Container for sub-networks. Passed to the build func, and retained between calls.
59
+ num_inputs: Number of input tensors.
60
+ num_outputs: Number of output tensors.
61
+ input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension.
62
+ output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension.
63
+ input_shape: Short-hand for input_shapes[0].
64
+ output_shape: Short-hand for output_shapes[0].
65
+ input_templates: Input placeholders in the template graph.
66
+ output_templates: Output tensors in the template graph.
67
+ input_names: Name string for each input.
68
+ output_names: Name string for each output.
69
+ own_vars: Variables defined by this network (local_name => var), excluding sub-networks.
70
+ vars: All variables (local_name => var).
71
+ trainables: All trainable variables (local_name => var).
72
+ var_global_to_local: Mapping from variable global names to local names.
73
+ """
74
+
75
+ def __init__(self, name: str = None, func_name: Any = None, **static_kwargs):
76
+ tfutil.assert_tf_initialized()
77
+ assert isinstance(name, str) or name is None
78
+ assert func_name is not None
79
+ assert isinstance(func_name, str) or util.is_top_level_function(func_name)
80
+ assert util.is_pickleable(static_kwargs)
81
+
82
+ self._init_fields()
83
+ self.name = name
84
+ self.static_kwargs = util.EasyDict(static_kwargs)
85
+
86
+ # Locate the user-specified network build function.
87
+ if util.is_top_level_function(func_name):
88
+ func_name = util.get_top_level_function_name(func_name)
89
+ module, self._build_func_name = util.get_module_from_obj_name(func_name)
90
+ self._build_func = util.get_obj_from_module(module, self._build_func_name)
91
+ assert callable(self._build_func)
92
+
93
+ # Dig up source code for the module containing the build function.
94
+ self._build_module_src = _import_module_src.get(module, None)
95
+ if self._build_module_src is None:
96
+ self._build_module_src = inspect.getsource(module)
97
+
98
+ # Init TensorFlow graph.
99
+ self._init_graph()
100
+ self.reset_own_vars()
101
+
102
+ def _init_fields(self) -> None:
103
+ self.name = None
104
+ self.scope = None
105
+ self.static_kwargs = util.EasyDict()
106
+ self.components = util.EasyDict()
107
+ self.num_inputs = 0
108
+ self.num_outputs = 0
109
+ self.input_shapes = [[]]
110
+ self.output_shapes = [[]]
111
+ self.input_shape = []
112
+ self.output_shape = []
113
+ self.input_templates = []
114
+ self.output_templates = []
115
+ self.input_names = []
116
+ self.output_names = []
117
+ self.own_vars = OrderedDict()
118
+ self.vars = OrderedDict()
119
+ self.trainables = OrderedDict()
120
+ self.var_global_to_local = OrderedDict()
121
+
122
+ self._build_func = None # User-supplied build function that constructs the network.
123
+ self._build_func_name = None # Name of the build function.
124
+ self._build_module_src = None # Full source code of the module containing the build function.
125
+ self._run_cache = dict() # Cached graph data for Network.run().
126
+
127
+ def _init_graph(self) -> None:
128
+ # Collect inputs.
129
+ self.input_names = []
130
+
131
+ for param in inspect.signature(self._build_func).parameters.values():
132
+ if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
133
+ self.input_names.append(param.name)
134
+
135
+ self.num_inputs = len(self.input_names)
136
+ assert self.num_inputs >= 1
137
+
138
+ # Choose name and scope.
139
+ if self.name is None:
140
+ self.name = self._build_func_name
141
+ assert re.match("^[A-Za-z0-9_.\\-]*$", self.name)
142
+ with tf.name_scope(None):
143
+ self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True)
144
+
145
+ # Finalize build func kwargs.
146
+ build_kwargs = dict(self.static_kwargs)
147
+ build_kwargs["is_template_graph"] = True
148
+ build_kwargs["components"] = self.components
149
+
150
+ # Build template graph.
151
+ with tfutil.absolute_variable_scope(self.scope, reuse=False), tfutil.absolute_name_scope(self.scope): # ignore surrounding scopes
152
+ assert tf.get_variable_scope().name == self.scope
153
+ assert tf.get_default_graph().get_name_scope() == self.scope
154
+ with tf.control_dependencies(None): # ignore surrounding control dependencies
155
+ self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
156
+ out_expr = self._build_func(*self.input_templates, **build_kwargs)
157
+
158
+ # Collect outputs.
159
+ assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
160
+ self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
161
+ self.num_outputs = len(self.output_templates)
162
+ assert self.num_outputs >= 1
163
+ assert all(tfutil.is_tf_expression(t) for t in self.output_templates)
164
+
165
+ # Perform sanity checks.
166
+ if any(t.shape.ndims is None for t in self.input_templates):
167
+ raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.")
168
+ if any(t.shape.ndims is None for t in self.output_templates):
169
+ raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.")
170
+ if any(not isinstance(comp, Network) for comp in self.components.values()):
171
+ raise ValueError("Components of a Network must be Networks themselves.")
172
+ if len(self.components) != len(set(comp.name for comp in self.components.values())):
173
+ raise ValueError("Components of a Network must have unique names.")
174
+
175
+ # List inputs and outputs.
176
+ self.input_shapes = [t.shape.as_list() for t in self.input_templates]
177
+ self.output_shapes = [t.shape.as_list() for t in self.output_templates]
178
+ self.input_shape = self.input_shapes[0]
179
+ self.output_shape = self.output_shapes[0]
180
+ self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates]
181
+
182
+ # List variables.
183
+ self.own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/"))
184
+ self.vars = OrderedDict(self.own_vars)
185
+ self.vars.update((comp.name + "/" + name, var) for comp in self.components.values() for name, var in comp.vars.items())
186
+ self.trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable)
187
+ self.var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items())
188
+
189
+ def reset_own_vars(self) -> None:
190
+ """Re-initialize all variables of this network, excluding sub-networks."""
191
+ tfutil.run([var.initializer for var in self.own_vars.values()])
192
+
193
+ def reset_vars(self) -> None:
194
+ """Re-initialize all variables of this network, including sub-networks."""
195
+ tfutil.run([var.initializer for var in self.vars.values()])
196
+
197
+ def reset_trainables(self) -> None:
198
+ """Re-initialize all trainable variables of this network, including sub-networks."""
199
+ tfutil.run([var.initializer for var in self.trainables.values()])
200
+
201
+ def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
202
+ """Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s)."""
203
+ assert len(in_expr) == self.num_inputs
204
+ assert not all(expr is None for expr in in_expr)
205
+
206
+ # Finalize build func kwargs.
207
+ build_kwargs = dict(self.static_kwargs)
208
+ build_kwargs.update(dynamic_kwargs)
209
+ build_kwargs["is_template_graph"] = False
210
+ build_kwargs["components"] = self.components
211
+
212
+ # Build TensorFlow graph to evaluate the network.
213
+ with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name):
214
+ assert tf.get_variable_scope().name == self.scope
215
+ valid_inputs = [expr for expr in in_expr if expr is not None]
216
+ final_inputs = []
217
+ for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes):
218
+ if expr is not None:
219
+ expr = tf.identity(expr, name=name)
220
+ else:
221
+ expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name)
222
+ final_inputs.append(expr)
223
+ out_expr = self._build_func(*final_inputs, **build_kwargs)
224
+
225
+ # Propagate input shapes back to the user-specified expressions.
226
+ for expr, final in zip(in_expr, final_inputs):
227
+ if isinstance(expr, tf.Tensor):
228
+ expr.set_shape(final.shape)
229
+
230
+ # Express outputs in the desired format.
231
+ assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
232
+ if return_as_list:
233
+ out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
234
+ return out_expr
235
+
236
+ def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str:
237
+ """Get the local name of a given variable, without any surrounding name scopes."""
238
+ assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)
239
+ global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name
240
+ return self.var_global_to_local[global_name]
241
+
242
+ def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression:
243
+ """Find variable by local or global name."""
244
+ assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)
245
+ return self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name
246
+
247
+ def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray:
248
+ """Get the value of a given variable as NumPy array.
249
+ Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible."""
250
+ return self.find_var(var_or_local_name).eval()
251
+
252
+ def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None:
253
+ """Set the value of a given variable based on the given NumPy array.
254
+ Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible."""
255
+ tfutil.set_vars({self.find_var(var_or_local_name): new_value})
256
+
257
+ def __getstate__(self) -> dict:
258
+ """Pickle export."""
259
+ state = dict()
260
+ state["version"] = 4
261
+ state["name"] = self.name
262
+ state["static_kwargs"] = dict(self.static_kwargs)
263
+ state["components"] = dict(self.components)
264
+ state["build_module_src"] = self._build_module_src
265
+ state["build_func_name"] = self._build_func_name
266
+ state["variables"] = list(zip(self.own_vars.keys(), tfutil.run(list(self.own_vars.values()))))
267
+ return state
268
+
269
+ def __setstate__(self, state: dict) -> None:
270
+ """Pickle import."""
271
+ # pylint: disable=attribute-defined-outside-init
272
+ tfutil.assert_tf_initialized()
273
+ self._init_fields()
274
+
275
+ # Execute custom import handlers.
276
+ for handler in _import_handlers:
277
+ state = handler(state)
278
+
279
+ # Set basic fields.
280
+ assert state["version"] in [2, 3, 4]
281
+ self.name = state["name"]
282
+ self.static_kwargs = util.EasyDict(state["static_kwargs"])
283
+ self.components = util.EasyDict(state.get("components", {}))
284
+ self._build_module_src = state["build_module_src"]
285
+ self._build_func_name = state["build_func_name"]
286
+
287
+ # Create temporary module from the imported source code.
288
+ module_name = "_tflib_network_import_" + uuid.uuid4().hex
289
+ module = types.ModuleType(module_name)
290
+ sys.modules[module_name] = module
291
+ _import_module_src[module] = self._build_module_src
292
+ exec(self._build_module_src, module.__dict__) # pylint: disable=exec-used
293
+
294
+ # Locate network build function in the temporary module.
295
+ self._build_func = util.get_obj_from_module(module, self._build_func_name)
296
+ assert callable(self._build_func)
297
+
298
+ # Init TensorFlow graph.
299
+ self._init_graph()
300
+ self.reset_own_vars()
301
+ tfutil.set_vars({self.find_var(name): value for name, value in state["variables"]})
302
+
303
+ def clone(self, name: str = None, **new_static_kwargs) -> "Network":
304
+ """Create a clone of this network with its own copy of the variables."""
305
+ # pylint: disable=protected-access
306
+ net = object.__new__(Network)
307
+ net._init_fields()
308
+ net.name = name if name is not None else self.name
309
+ net.static_kwargs = util.EasyDict(self.static_kwargs)
310
+ net.static_kwargs.update(new_static_kwargs)
311
+ net._build_module_src = self._build_module_src
312
+ net._build_func_name = self._build_func_name
313
+ net._build_func = self._build_func
314
+ net._init_graph()
315
+ net.copy_vars_from(self)
316
+ return net
317
+
318
+ def copy_own_vars_from(self, src_net: "Network") -> None:
319
+ """Copy the values of all variables from the given network, excluding sub-networks."""
320
+ names = [name for name in self.own_vars.keys() if name in src_net.own_vars]
321
+ tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
322
+
323
+ def copy_vars_from(self, src_net: "Network") -> None:
324
+ """Copy the values of all variables from the given network, including sub-networks."""
325
+ names = [name for name in self.vars.keys() if name in src_net.vars]
326
+ tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
327
+
328
+ def copy_trainables_from(self, src_net: "Network") -> None:
329
+ """Copy the values of all trainable variables from the given network, including sub-networks."""
330
+ names = [name for name in self.trainables.keys() if name in src_net.trainables]
331
+ tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
332
+
333
+ def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network":
334
+ """Create new network with the given parameters, and copy all variables from this network."""
335
+ if new_name is None:
336
+ new_name = self.name
337
+ static_kwargs = dict(self.static_kwargs)
338
+ static_kwargs.update(new_static_kwargs)
339
+ net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
340
+ net.copy_vars_from(self)
341
+ return net
342
+
343
+ def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
344
+ """Construct a TensorFlow op that updates the variables of this network
345
+ to be slightly closer to those of the given network."""
346
+ with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"):
347
+ ops = []
348
+ for name, var in self.vars.items():
349
+ if name in src_net.vars:
350
+ cur_beta = beta if name in self.trainables else beta_nontrainable
351
+ new_value = tfutil.lerp(src_net.vars[name], var, cur_beta)
352
+ ops.append(var.assign(new_value))
353
+ return tf.group(*ops)
354
+
355
+ def run(self,
356
+ *in_arrays: Tuple[Union[np.ndarray, None], ...],
357
+ input_transform: dict = None,
358
+ output_transform: dict = None,
359
+ return_as_list: bool = False,
360
+ print_progress: bool = False,
361
+ minibatch_size: int = None,
362
+ num_gpus: int = 1,
363
+ assume_frozen: bool = False,
364
+ **dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
365
+ """Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
366
+
367
+ Args:
368
+ input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network.
369
+ The dict must contain a 'func' field that points to a top-level function. The function is called with the input
370
+ TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
371
+ output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network.
372
+ The dict must contain a 'func' field that points to a top-level function. The function is called with the output
373
+ TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
374
+ return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
375
+ print_progress: Print progress to the console? Useful for very large input arrays.
376
+ minibatch_size: Maximum minibatch size to use, None = disable batching.
377
+ num_gpus: Number of GPUs to use.
378
+ assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls.
379
+ dynamic_kwargs: Additional keyword arguments to be passed into the network build function.
380
+ """
381
+ assert len(in_arrays) == self.num_inputs
382
+ assert not all(arr is None for arr in in_arrays)
383
+ assert input_transform is None or util.is_top_level_function(input_transform["func"])
384
+ assert output_transform is None or util.is_top_level_function(output_transform["func"])
385
+ output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs)
386
+ num_items = in_arrays[0].shape[0]
387
+ if minibatch_size is None:
388
+ minibatch_size = num_items
389
+
390
+ # Construct unique hash key from all arguments that affect the TensorFlow graph.
391
+ key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs)
392
+ def unwind_key(obj):
393
+ if isinstance(obj, dict):
394
+ return [(key, unwind_key(value)) for key, value in sorted(obj.items())]
395
+ if callable(obj):
396
+ return util.get_top_level_function_name(obj)
397
+ return obj
398
+ key = repr(unwind_key(key))
399
+
400
+ # Build graph.
401
+ if key not in self._run_cache:
402
+ with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None):
403
+ with tf.device("/cpu:0"):
404
+ in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
405
+ in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
406
+
407
+ out_split = []
408
+ for gpu in range(num_gpus):
409
+ with tf.device("/gpu:%d" % gpu):
410
+ net_gpu = self.clone() if assume_frozen else self
411
+ in_gpu = in_split[gpu]
412
+
413
+ if input_transform is not None:
414
+ in_kwargs = dict(input_transform)
415
+ in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs)
416
+ in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu)
417
+
418
+ assert len(in_gpu) == self.num_inputs
419
+ out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs)
420
+
421
+ if output_transform is not None:
422
+ out_kwargs = dict(output_transform)
423
+ out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs)
424
+ out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu)
425
+
426
+ assert len(out_gpu) == self.num_outputs
427
+ out_split.append(out_gpu)
428
+
429
+ with tf.device("/cpu:0"):
430
+ out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
431
+ self._run_cache[key] = in_expr, out_expr
432
+
433
+ # Run minibatches.
434
+ in_expr, out_expr = self._run_cache[key]
435
+ out_arrays = [np.empty([num_items] + expr.shape.as_list()[1:], expr.dtype.name) for expr in out_expr]
436
+
437
+ for mb_begin in range(0, num_items, minibatch_size):
438
+ if print_progress:
439
+ print("\r%d / %d" % (mb_begin, num_items), end="")
440
+
441
+ mb_end = min(mb_begin + minibatch_size, num_items)
442
+ mb_num = mb_end - mb_begin
443
+ mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)]
444
+ mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in)))
445
+
446
+ for dst, src in zip(out_arrays, mb_out):
447
+ dst[mb_begin: mb_end] = src
448
+
449
+ # Done.
450
+ if print_progress:
451
+ print("\r%d / %d" % (num_items, num_items))
452
+
453
+ if not return_as_list:
454
+ out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
455
+ return out_arrays
456
+
457
+ def list_ops(self) -> List[TfExpression]:
458
+ include_prefix = self.scope + "/"
459
+ exclude_prefix = include_prefix + "_"
460
+ ops = tf.get_default_graph().get_operations()
461
+ ops = [op for op in ops if op.name.startswith(include_prefix)]
462
+ ops = [op for op in ops if not op.name.startswith(exclude_prefix)]
463
+ return ops
464
+
465
+ def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
466
+ """Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to
467
+ individual layers of the network. Mainly intended to be used for reporting."""
468
+ layers = []
469
+
470
+ def recurse(scope, parent_ops, parent_vars, level):
471
+ # Ignore specific patterns.
472
+ if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]):
473
+ return
474
+
475
+ # Filter ops and vars by scope.
476
+ global_prefix = scope + "/"
477
+ local_prefix = global_prefix[len(self.scope) + 1:]
478
+ cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]]
479
+ cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]]
480
+ if not cur_ops and not cur_vars:
481
+ return
482
+
483
+ # Filter out all ops related to variables.
484
+ for var in [op for op in cur_ops if op.type.startswith("Variable")]:
485
+ var_prefix = var.name + "/"
486
+ cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)]
487
+
488
+ # Scope does not contain ops as immediate children => recurse deeper.
489
+ contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type not in ["Identity", "Cast", "Transpose"] for op in cur_ops)
490
+ if (level == 0 or not contains_direct_ops) and (len(cur_ops) + len(cur_vars)) > 1:
491
+ visited = set()
492
+ for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]:
493
+ token = rel_name.split("/")[0]
494
+ if token not in visited:
495
+ recurse(global_prefix + token, cur_ops, cur_vars, level + 1)
496
+ visited.add(token)
497
+ return
498
+
499
+ # Report layer.
500
+ layer_name = scope[len(self.scope) + 1:]
501
+ layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1]
502
+ layer_trainables = [var for _name, var in cur_vars if var.trainable]
503
+ layers.append((layer_name, layer_output, layer_trainables))
504
+
505
+ recurse(self.scope, self.list_ops(), list(self.vars.items()), 0)
506
+ return layers
507
+
508
+ def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None:
509
+ """Print a summary table of the network structure."""
510
+ rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]]
511
+ rows += [["---"] * 4]
512
+ total_params = 0
513
+
514
+ for layer_name, layer_output, layer_trainables in self.list_layers():
515
+ num_params = sum(int(np.prod(var.shape.as_list())) for var in layer_trainables)
516
+ weights = [var for var in layer_trainables if var.name.endswith("/weight:0")]
517
+ weights.sort(key=lambda x: len(x.name))
518
+ if len(weights) == 0 and len(layer_trainables) == 1:
519
+ weights = layer_trainables
520
+ total_params += num_params
521
+
522
+ if not hide_layers_with_no_params or num_params != 0:
523
+ num_params_str = str(num_params) if num_params > 0 else "-"
524
+ output_shape_str = str(layer_output.shape)
525
+ weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-"
526
+ rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]]
527
+
528
+ rows += [["---"] * 4]
529
+ rows += [["Total", str(total_params), "", ""]]
530
+
531
+ widths = [max(len(cell) for cell in column) for column in zip(*rows)]
532
+ print()
533
+ for row in rows:
534
+ print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths)))
535
+ print()
536
+
537
+ def setup_weight_histograms(self, title: str = None) -> None:
538
+ """Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
539
+ if title is None:
540
+ title = self.name
541
+
542
+ with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
543
+ for local_name, var in self.trainables.items():
544
+ if "/" in local_name:
545
+ p = local_name.split("/")
546
+ name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
547
+ else:
548
+ name = title + "_toplevel/" + local_name
549
+
550
+ tf.summary.histogram(name, var)
551
+
552
+ #----------------------------------------------------------------------------
553
+ # Backwards-compatible emulation of legacy output transformation in Network.run().
554
+
555
+ _print_legacy_warning = True
556
+
557
+ def _handle_legacy_output_transforms(output_transform, dynamic_kwargs):
558
+ global _print_legacy_warning
559
+ legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"]
560
+ if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs):
561
+ return output_transform, dynamic_kwargs
562
+
563
+ if _print_legacy_warning:
564
+ _print_legacy_warning = False
565
+ print()
566
+ print("WARNING: Old-style output transformations in Network.run() are deprecated.")
567
+ print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'")
568
+ print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.")
569
+ print()
570
+ assert output_transform is None
571
+
572
+ new_kwargs = dict(dynamic_kwargs)
573
+ new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs}
574
+ new_transform["func"] = _legacy_output_transform_func
575
+ return new_transform, new_kwargs
576
+
577
+ def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
578
+ if out_mul != 1.0:
579
+ expr = [x * out_mul for x in expr]
580
+
581
+ if out_add != 0.0:
582
+ expr = [x + out_add for x in expr]
583
+
584
+ if out_shrink > 1:
585
+ ksize = [1, 1, out_shrink, out_shrink]
586
+ expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]
587
+
588
+ if out_dtype is not None:
589
+ if tf.as_dtype(out_dtype).is_integer:
590
+ expr = [tf.round(x) for x in expr]
591
+ expr = [tf.saturate_cast(x, out_dtype) for x in expr]
592
+ return expr
stylegan_human/dnnlib/tflib/ops/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ #
5
+ # This work is made available under the Nvidia Source Code License-NC.
6
+ # To view a copy of this license, visit
7
+ # https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ # empty
stylegan_human/dnnlib/tflib/ops/fused_bias_act.cu ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ // Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ //
5
+ // This work is made available under the Nvidia Source Code License-NC.
6
+ // To view a copy of this license, visit
7
+ // https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ #define EIGEN_USE_GPU
10
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
11
+ #include "tensorflow/core/framework/op.h"
12
+ #include "tensorflow/core/framework/op_kernel.h"
13
+ #include "tensorflow/core/framework/shape_inference.h"
14
+ #include <stdio.h>
15
+
16
+ using namespace tensorflow;
17
+ using namespace tensorflow::shape_inference;
18
+
19
+ #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal(cudaGetErrorName(err))); } while (false)
20
+
21
+ //------------------------------------------------------------------------
22
+ // CUDA kernel.
23
+
24
+ template <class T>
25
+ struct FusedBiasActKernelParams
26
+ {
27
+ const T* x; // [sizeX]
28
+ const T* b; // [sizeB] or NULL
29
+ const T* ref; // [sizeX] or NULL
30
+ T* y; // [sizeX]
31
+
32
+ int grad;
33
+ int axis;
34
+ int act;
35
+ float alpha;
36
+ float gain;
37
+
38
+ int sizeX;
39
+ int sizeB;
40
+ int stepB;
41
+ int loopX;
42
+ };
43
+
44
+ template <class T>
45
+ static __global__ void FusedBiasActKernel(const FusedBiasActKernelParams<T> p)
46
+ {
47
+ const float expRange = 80.0f;
48
+ const float halfExpRange = 40.0f;
49
+ const float seluScale = 1.0507009873554804934193349852946f;
50
+ const float seluAlpha = 1.6732632423543772848170429916717f;
51
+
52
+ // Loop over elements.
53
+ int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x;
54
+ for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x)
55
+ {
56
+ // Load and apply bias.
57
+ float x = (float)p.x[xi];
58
+ if (p.b)
59
+ x += (float)p.b[(xi / p.stepB) % p.sizeB];
60
+ float ref = (p.ref) ? (float)p.ref[xi] : 0.0f;
61
+ if (p.gain != 0.0f & p.act != 9)
62
+ ref /= p.gain;
63
+
64
+ // Evaluate activation func.
65
+ float y;
66
+ switch (p.act * 10 + p.grad)
67
+ {
68
+ // linear
69
+ default:
70
+ case 10: y = x; break;
71
+ case 11: y = x; break;
72
+ case 12: y = 0.0f; break;
73
+
74
+ // relu
75
+ case 20: y = (x > 0.0f) ? x : 0.0f; break;
76
+ case 21: y = (ref > 0.0f) ? x : 0.0f; break;
77
+ case 22: y = 0.0f; break;
78
+
79
+ // lrelu
80
+ case 30: y = (x > 0.0f) ? x : x * p.alpha; break;
81
+ case 31: y = (ref > 0.0f) ? x : x * p.alpha; break;
82
+ case 32: y = 0.0f; break;
83
+
84
+ // tanh
85
+ case 40: { float c = expf(x); float d = 1.0f / c; y = (x < -expRange) ? -1.0f : (x > expRange) ? 1.0f : (c - d) / (c + d); } break;
86
+ case 41: y = x * (1.0f - ref * ref); break;
87
+ case 42: y = x * (1.0f - ref * ref) * (-2.0f * ref); break;
88
+
89
+ // sigmoid
90
+ case 50: y = (x < -expRange) ? 0.0f : 1.0f / (expf(-x) + 1.0f); break;
91
+ case 51: y = x * ref * (1.0f - ref); break;
92
+ case 52: y = x * ref * (1.0f - ref) * (1.0f - 2.0f * ref); break;
93
+
94
+ // elu
95
+ case 60: y = (x >= 0.0f) ? x : expf(x) - 1.0f; break;
96
+ case 61: y = (ref >= 0.0f) ? x : x * (ref + 1.0f); break;
97
+ case 62: y = (ref >= 0.0f) ? 0.0f : x * (ref + 1.0f); break;
98
+
99
+ // selu
100
+ case 70: y = (x >= 0.0f) ? seluScale * x : (seluScale * seluAlpha) * (expf(x) - 1.0f); break;
101
+ case 71: y = (ref >= 0.0f) ? x * seluScale : x * (ref + seluScale * seluAlpha); break;
102
+ case 72: y = (ref >= 0.0f) ? 0.0f : x * (ref + seluScale * seluAlpha); break;
103
+
104
+ // softplus
105
+ case 80: y = (x > expRange) ? x : logf(expf(x) + 1.0f); break;
106
+ case 81: y = x * (1.0f - expf(-ref)); break;
107
+ case 82: { float c = expf(-ref); y = x * c * (1.0f - c); } break;
108
+
109
+ // swish
110
+ case 90: y = (x < -expRange) ? 0.0f : x / (expf(-x) + 1.0f); break;
111
+ case 91: { float c = expf(ref); float d = c + 1.0f; y = (ref > halfExpRange) ? x : x * c * (ref + d) / (d * d); } break;
112
+ case 92: { float c = expf(ref); float d = c + 1.0f; y = (ref > halfExpRange) ? 0.0f : x * c * (ref * (2.0f - d) + 2.0f * d) / (d * d * d); } break;
113
+ }
114
+
115
+ // Apply gain and store.
116
+ p.y[xi] = (T)(y * p.gain);
117
+ }
118
+ }
119
+
120
+ //------------------------------------------------------------------------
121
+ // TensorFlow op.
122
+
123
+ template <class T>
124
+ struct FusedBiasActOp : public OpKernel
125
+ {
126
+ FusedBiasActKernelParams<T> m_attribs;
127
+
128
+ FusedBiasActOp(OpKernelConstruction* ctx) : OpKernel(ctx)
129
+ {
130
+ memset(&m_attribs, 0, sizeof(m_attribs));
131
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("grad", &m_attribs.grad));
132
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &m_attribs.axis));
133
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("act", &m_attribs.act));
134
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("alpha", &m_attribs.alpha));
135
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("gain", &m_attribs.gain));
136
+ OP_REQUIRES(ctx, m_attribs.grad >= 0, errors::InvalidArgument("grad must be non-negative"));
137
+ OP_REQUIRES(ctx, m_attribs.axis >= 0, errors::InvalidArgument("axis must be non-negative"));
138
+ OP_REQUIRES(ctx, m_attribs.act >= 0, errors::InvalidArgument("act must be non-negative"));
139
+ }
140
+
141
+ void Compute(OpKernelContext* ctx)
142
+ {
143
+ FusedBiasActKernelParams<T> p = m_attribs;
144
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
145
+
146
+ const Tensor& x = ctx->input(0); // [...]
147
+ const Tensor& b = ctx->input(1); // [sizeB] or [0]
148
+ const Tensor& ref = ctx->input(2); // x.shape or [0]
149
+ p.x = x.flat<T>().data();
150
+ p.b = (b.NumElements()) ? b.flat<T>().data() : NULL;
151
+ p.ref = (ref.NumElements()) ? ref.flat<T>().data() : NULL;
152
+ OP_REQUIRES(ctx, b.NumElements() == 0 || m_attribs.axis < x.dims(), errors::InvalidArgument("axis out of bounds"));
153
+ OP_REQUIRES(ctx, b.dims() == 1, errors::InvalidArgument("b must have rank 1"));
154
+ OP_REQUIRES(ctx, b.NumElements() == 0 || b.NumElements() == x.dim_size(m_attribs.axis), errors::InvalidArgument("b has wrong number of elements"));
155
+ OP_REQUIRES(ctx, ref.NumElements() == ((p.grad == 0) ? 0 : x.NumElements()), errors::InvalidArgument("ref has wrong number of elements"));
156
+ OP_REQUIRES(ctx, x.NumElements() <= kint32max, errors::InvalidArgument("x is too large"));
157
+
158
+ p.sizeX = (int)x.NumElements();
159
+ p.sizeB = (int)b.NumElements();
160
+ p.stepB = 1;
161
+ for (int i = m_attribs.axis + 1; i < x.dims(); i++)
162
+ p.stepB *= (int)x.dim_size(i);
163
+
164
+ Tensor* y = NULL; // x.shape
165
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, x.shape(), &y));
166
+ p.y = y->flat<T>().data();
167
+
168
+ p.loopX = 4;
169
+ int blockSize = 4 * 32;
170
+ int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
171
+ void* args[] = {&p};
172
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)FusedBiasActKernel<T>, gridSize, blockSize, args, 0, stream));
173
+ }
174
+ };
175
+
176
+ REGISTER_OP("FusedBiasAct")
177
+ .Input ("x: T")
178
+ .Input ("b: T")
179
+ .Input ("ref: T")
180
+ .Output ("y: T")
181
+ .Attr ("T: {float, half}")
182
+ .Attr ("grad: int = 0")
183
+ .Attr ("axis: int = 1")
184
+ .Attr ("act: int = 0")
185
+ .Attr ("alpha: float = 0.0")
186
+ .Attr ("gain: float = 1.0");
187
+ REGISTER_KERNEL_BUILDER(Name("FusedBiasAct").Device(DEVICE_GPU).TypeConstraint<float>("T"), FusedBiasActOp<float>);
188
+ REGISTER_KERNEL_BUILDER(Name("FusedBiasAct").Device(DEVICE_GPU).TypeConstraint<Eigen::half>("T"), FusedBiasActOp<Eigen::half>);
189
+
190
+ //------------------------------------------------------------------------
stylegan_human/dnnlib/tflib/ops/fused_bias_act.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ #
5
+ # This work is made available under the Nvidia Source Code License-NC.
6
+ # To view a copy of this license, visit
7
+ # https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ """Custom TensorFlow ops for efficient bias and activation."""
10
+
11
+ import os
12
+ import numpy as np
13
+ import tensorflow as tf
14
+ from .. import custom_ops
15
+ from ...util import EasyDict
16
+
17
+ def _get_plugin():
18
+ return custom_ops.get_plugin(os.path.splitext(__file__)[0] + '.cu')
19
+
20
+ #----------------------------------------------------------------------------
21
+
22
+ activation_funcs = {
23
+ 'linear': EasyDict(func=lambda x, **_: x, def_alpha=None, def_gain=1.0, cuda_idx=1, ref='y', zero_2nd_grad=True),
24
+ 'relu': EasyDict(func=lambda x, **_: tf.nn.relu(x), def_alpha=None, def_gain=np.sqrt(2), cuda_idx=2, ref='y', zero_2nd_grad=True),
25
+ 'lrelu': EasyDict(func=lambda x, alpha, **_: tf.nn.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', zero_2nd_grad=True),
26
+ 'tanh': EasyDict(func=lambda x, **_: tf.nn.tanh(x), def_alpha=None, def_gain=1.0, cuda_idx=4, ref='y', zero_2nd_grad=False),
27
+ 'sigmoid': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x), def_alpha=None, def_gain=1.0, cuda_idx=5, ref='y', zero_2nd_grad=False),
28
+ 'elu': EasyDict(func=lambda x, **_: tf.nn.elu(x), def_alpha=None, def_gain=1.0, cuda_idx=6, ref='y', zero_2nd_grad=False),
29
+ 'selu': EasyDict(func=lambda x, **_: tf.nn.selu(x), def_alpha=None, def_gain=1.0, cuda_idx=7, ref='y', zero_2nd_grad=False),
30
+ 'softplus': EasyDict(func=lambda x, **_: tf.nn.softplus(x), def_alpha=None, def_gain=1.0, cuda_idx=8, ref='y', zero_2nd_grad=False),
31
+ 'swish': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x) * x, def_alpha=None, def_gain=np.sqrt(2), cuda_idx=9, ref='x', zero_2nd_grad=False),
32
+ }
33
+
34
+ #----------------------------------------------------------------------------
35
+
36
+ def fused_bias_act(x, b=None, axis=1, act='linear', alpha=None, gain=None, impl='cuda'):
37
+ r"""Fused bias and activation function.
38
+
39
+ Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
40
+ and scales the result by `gain`. Each of the steps is optional. In most cases,
41
+ the fused op is considerably more efficient than performing the same calculation
42
+ using standard TensorFlow ops. It supports first and second order gradients,
43
+ but not third order gradients.
44
+
45
+ Args:
46
+ x: Input activation tensor. Can have any shape, but if `b` is defined, the
47
+ dimension corresponding to `axis`, as well as the rank, must be known.
48
+ b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
49
+ as `x`. The shape must be known, and it must match the dimension of `x`
50
+ corresponding to `axis`.
51
+ axis: The dimension in `x` corresponding to the elements of `b`.
52
+ The value of `axis` is ignored if `b` is not specified.
53
+ act: Name of the activation function to evaluate, or `"linear"` to disable.
54
+ Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
55
+ See `activation_funcs` for a full list. `None` is not allowed.
56
+ alpha: Shape parameter for the activation function, or `None` to use the default.
57
+ gain: Scaling factor for the output tensor, or `None` to use default.
58
+ See `activation_funcs` for the default scaling of each activation function.
59
+ If unsure, consider specifying `1.0`.
60
+ impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
61
+
62
+ Returns:
63
+ Tensor of the same shape and datatype as `x`.
64
+ """
65
+
66
+ impl_dict = {
67
+ 'ref': _fused_bias_act_ref,
68
+ 'cuda': _fused_bias_act_cuda,
69
+ }
70
+ return impl_dict[impl](x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain)
71
+
72
+ #----------------------------------------------------------------------------
73
+
74
+ def _fused_bias_act_ref(x, b, axis, act, alpha, gain):
75
+ """Slow reference implementation of `fused_bias_act()` using standard TensorFlow ops."""
76
+
77
+ # Validate arguments.
78
+ x = tf.convert_to_tensor(x)
79
+ b = tf.convert_to_tensor(b) if b is not None else tf.constant([], dtype=x.dtype)
80
+ act_spec = activation_funcs[act]
81
+ assert b.shape.rank == 1 and (b.shape[0] == 0 or b.shape[0] == x.shape[axis])
82
+ assert b.shape[0] == 0 or 0 <= axis < x.shape.rank
83
+ if alpha is None:
84
+ alpha = act_spec.def_alpha
85
+ if gain is None:
86
+ gain = act_spec.def_gain
87
+
88
+ # Add bias.
89
+ if b.shape[0] != 0:
90
+ x += tf.reshape(b, [-1 if i == axis else 1 for i in range(x.shape.rank)])
91
+
92
+ # Evaluate activation function.
93
+ x = act_spec.func(x, alpha=alpha)
94
+
95
+ # Scale by gain.
96
+ if gain != 1:
97
+ x *= gain
98
+ return x
99
+
100
+ #----------------------------------------------------------------------------
101
+
102
+ def _fused_bias_act_cuda(x, b, axis, act, alpha, gain):
103
+ """Fast CUDA implementation of `fused_bias_act()` using custom ops."""
104
+
105
+ # Validate arguments.
106
+ x = tf.convert_to_tensor(x)
107
+ empty_tensor = tf.constant([], dtype=x.dtype)
108
+ b = tf.convert_to_tensor(b) if b is not None else empty_tensor
109
+ act_spec = activation_funcs[act]
110
+ assert b.shape.rank == 1 and (b.shape[0] == 0 or b.shape[0] == x.shape[axis])
111
+ assert b.shape[0] == 0 or 0 <= axis < x.shape.rank
112
+ if alpha is None:
113
+ alpha = act_spec.def_alpha
114
+ if gain is None:
115
+ gain = act_spec.def_gain
116
+
117
+ # Special cases.
118
+ if act == 'linear' and b is None and gain == 1.0:
119
+ return x
120
+ if act_spec.cuda_idx is None:
121
+ return _fused_bias_act_ref(x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain)
122
+
123
+ # CUDA kernel.
124
+ cuda_kernel = _get_plugin().fused_bias_act
125
+ cuda_kwargs = dict(axis=axis, act=act_spec.cuda_idx, alpha=alpha, gain=gain)
126
+
127
+ # Forward pass: y = func(x, b).
128
+ def func_y(x, b):
129
+ y = cuda_kernel(x=x, b=b, ref=empty_tensor, grad=0, **cuda_kwargs)
130
+ y.set_shape(x.shape)
131
+ return y
132
+
133
+ # Backward pass: dx, db = grad(dy, x, y)
134
+ def grad_dx(dy, x, y):
135
+ ref = {'x': x, 'y': y}[act_spec.ref]
136
+ dx = cuda_kernel(x=dy, b=empty_tensor, ref=ref, grad=1, **cuda_kwargs)
137
+ dx.set_shape(x.shape)
138
+ return dx
139
+ def grad_db(dx):
140
+ if b.shape[0] == 0:
141
+ return empty_tensor
142
+ db = dx
143
+ if axis < x.shape.rank - 1:
144
+ db = tf.reduce_sum(db, list(range(axis + 1, x.shape.rank)))
145
+ if axis > 0:
146
+ db = tf.reduce_sum(db, list(range(axis)))
147
+ db.set_shape(b.shape)
148
+ return db
149
+
150
+ # Second order gradients: d_dy, d_x = grad2(d_dx, d_db, x, y)
151
+ def grad2_d_dy(d_dx, d_db, x, y):
152
+ ref = {'x': x, 'y': y}[act_spec.ref]
153
+ d_dy = cuda_kernel(x=d_dx, b=d_db, ref=ref, grad=1, **cuda_kwargs)
154
+ d_dy.set_shape(x.shape)
155
+ return d_dy
156
+ def grad2_d_x(d_dx, d_db, x, y):
157
+ ref = {'x': x, 'y': y}[act_spec.ref]
158
+ d_x = cuda_kernel(x=d_dx, b=d_db, ref=ref, grad=2, **cuda_kwargs)
159
+ d_x.set_shape(x.shape)
160
+ return d_x
161
+
162
+ # Fast version for piecewise-linear activation funcs.
163
+ @tf.custom_gradient
164
+ def func_zero_2nd_grad(x, b):
165
+ y = func_y(x, b)
166
+ @tf.custom_gradient
167
+ def grad(dy):
168
+ dx = grad_dx(dy, x, y)
169
+ db = grad_db(dx)
170
+ def grad2(d_dx, d_db):
171
+ d_dy = grad2_d_dy(d_dx, d_db, x, y)
172
+ return d_dy
173
+ return (dx, db), grad2
174
+ return y, grad
175
+
176
+ # Slow version for general activation funcs.
177
+ @tf.custom_gradient
178
+ def func_nonzero_2nd_grad(x, b):
179
+ y = func_y(x, b)
180
+ def grad_wrap(dy):
181
+ @tf.custom_gradient
182
+ def grad_impl(dy, x):
183
+ dx = grad_dx(dy, x, y)
184
+ db = grad_db(dx)
185
+ def grad2(d_dx, d_db):
186
+ d_dy = grad2_d_dy(d_dx, d_db, x, y)
187
+ d_x = grad2_d_x(d_dx, d_db, x, y)
188
+ return d_dy, d_x
189
+ return (dx, db), grad2
190
+ return grad_impl(dy, x)
191
+ return y, grad_wrap
192
+
193
+ # Which version to use?
194
+ if act_spec.zero_2nd_grad:
195
+ return func_zero_2nd_grad(x, b)
196
+ return func_nonzero_2nd_grad(x, b)
197
+
198
+ #----------------------------------------------------------------------------
stylegan_human/dnnlib/tflib/ops/upfirdn_2d.cu ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ // Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ //
5
+ // This work is made available under the Nvidia Source Code License-NC.
6
+ // To view a copy of this license, visit
7
+ // https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ #define EIGEN_USE_GPU
10
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
11
+ #include "tensorflow/core/framework/op.h"
12
+ #include "tensorflow/core/framework/op_kernel.h"
13
+ #include "tensorflow/core/framework/shape_inference.h"
14
+ #include <stdio.h>
15
+
16
+ using namespace tensorflow;
17
+ using namespace tensorflow::shape_inference;
18
+
19
+ //------------------------------------------------------------------------
20
+ // Helpers.
21
+
22
+ #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal(cudaGetErrorName(err))); } while (false)
23
+
24
+ static __host__ __device__ __forceinline__ int floorDiv(int a, int b)
25
+ {
26
+ int c = a / b;
27
+ if (c * b > a)
28
+ c--;
29
+ return c;
30
+ }
31
+
32
+ //------------------------------------------------------------------------
33
+ // CUDA kernel params.
34
+
35
+ template <class T>
36
+ struct UpFirDn2DKernelParams
37
+ {
38
+ const T* x; // [majorDim, inH, inW, minorDim]
39
+ const T* k; // [kernelH, kernelW]
40
+ T* y; // [majorDim, outH, outW, minorDim]
41
+
42
+ int upx;
43
+ int upy;
44
+ int downx;
45
+ int downy;
46
+ int padx0;
47
+ int padx1;
48
+ int pady0;
49
+ int pady1;
50
+
51
+ int majorDim;
52
+ int inH;
53
+ int inW;
54
+ int minorDim;
55
+ int kernelH;
56
+ int kernelW;
57
+ int outH;
58
+ int outW;
59
+ int loopMajor;
60
+ int loopX;
61
+ };
62
+
63
+ //------------------------------------------------------------------------
64
+ // General CUDA implementation for large filter kernels.
65
+
66
+ template <class T>
67
+ static __global__ void UpFirDn2DKernel_large(const UpFirDn2DKernelParams<T> p)
68
+ {
69
+ // Calculate thread index.
70
+ int minorIdx = blockIdx.x * blockDim.x + threadIdx.x;
71
+ int outY = minorIdx / p.minorDim;
72
+ minorIdx -= outY * p.minorDim;
73
+ int outXBase = blockIdx.y * p.loopX * blockDim.y + threadIdx.y;
74
+ int majorIdxBase = blockIdx.z * p.loopMajor;
75
+ if (outXBase >= p.outW || outY >= p.outH || majorIdxBase >= p.majorDim)
76
+ return;
77
+
78
+ // Setup Y receptive field.
79
+ int midY = outY * p.downy + p.upy - 1 - p.pady0;
80
+ int inY = min(max(floorDiv(midY, p.upy), 0), p.inH);
81
+ int h = min(max(floorDiv(midY + p.kernelH, p.upy), 0), p.inH) - inY;
82
+ int kernelY = midY + p.kernelH - (inY + 1) * p.upy;
83
+
84
+ // Loop over majorDim and outX.
85
+ for (int loopMajor = 0, majorIdx = majorIdxBase; loopMajor < p.loopMajor && majorIdx < p.majorDim; loopMajor++, majorIdx++)
86
+ for (int loopX = 0, outX = outXBase; loopX < p.loopX && outX < p.outW; loopX++, outX += blockDim.y)
87
+ {
88
+ // Setup X receptive field.
89
+ int midX = outX * p.downx + p.upx - 1 - p.padx0;
90
+ int inX = min(max(floorDiv(midX, p.upx), 0), p.inW);
91
+ int w = min(max(floorDiv(midX + p.kernelW, p.upx), 0), p.inW) - inX;
92
+ int kernelX = midX + p.kernelW - (inX + 1) * p.upx;
93
+
94
+ // Initialize pointers.
95
+ const T* xp = &p.x[((majorIdx * p.inH + inY) * p.inW + inX) * p.minorDim + minorIdx];
96
+ const T* kp = &p.k[kernelY * p.kernelW + kernelX];
97
+ int xpx = p.minorDim;
98
+ int kpx = -p.upx;
99
+ int xpy = p.inW * p.minorDim;
100
+ int kpy = -p.upy * p.kernelW;
101
+
102
+ // Inner loop.
103
+ float v = 0.0f;
104
+ for (int y = 0; y < h; y++)
105
+ {
106
+ for (int x = 0; x < w; x++)
107
+ {
108
+ v += (float)(*xp) * (float)(*kp);
109
+ xp += xpx;
110
+ kp += kpx;
111
+ }
112
+ xp += xpy - w * xpx;
113
+ kp += kpy - w * kpx;
114
+ }
115
+
116
+ // Store result.
117
+ p.y[((majorIdx * p.outH + outY) * p.outW + outX) * p.minorDim + minorIdx] = (T)v;
118
+ }
119
+ }
120
+
121
+ //------------------------------------------------------------------------
122
+ // Specialized CUDA implementation for small filter kernels.
123
+
124
+ template <class T, int upx, int upy, int downx, int downy, int kernelW, int kernelH, int tileOutW, int tileOutH>
125
+ static __global__ void UpFirDn2DKernel_small(const UpFirDn2DKernelParams<T> p)
126
+ {
127
+ //assert(kernelW % upx == 0);
128
+ //assert(kernelH % upy == 0);
129
+ const int tileInW = ((tileOutW - 1) * downx + kernelW - 1) / upx + 1;
130
+ const int tileInH = ((tileOutH - 1) * downy + kernelH - 1) / upy + 1;
131
+ __shared__ volatile float sk[kernelH][kernelW];
132
+ __shared__ volatile float sx[tileInH][tileInW];
133
+
134
+ // Calculate tile index.
135
+ int minorIdx = blockIdx.x;
136
+ int tileOutY = minorIdx / p.minorDim;
137
+ minorIdx -= tileOutY * p.minorDim;
138
+ tileOutY *= tileOutH;
139
+ int tileOutXBase = blockIdx.y * p.loopX * tileOutW;
140
+ int majorIdxBase = blockIdx.z * p.loopMajor;
141
+ if (tileOutXBase >= p.outW | tileOutY >= p.outH | majorIdxBase >= p.majorDim)
142
+ return;
143
+
144
+ // Load filter kernel (flipped).
145
+ for (int tapIdx = threadIdx.x; tapIdx < kernelH * kernelW; tapIdx += blockDim.x)
146
+ {
147
+ int ky = tapIdx / kernelW;
148
+ int kx = tapIdx - ky * kernelW;
149
+ float v = 0.0f;
150
+ if (kx < p.kernelW & ky < p.kernelH)
151
+ v = (float)p.k[(p.kernelH - 1 - ky) * p.kernelW + (p.kernelW - 1 - kx)];
152
+ sk[ky][kx] = v;
153
+ }
154
+
155
+ // Loop over majorDim and outX.
156
+ for (int loopMajor = 0, majorIdx = majorIdxBase; loopMajor < p.loopMajor & majorIdx < p.majorDim; loopMajor++, majorIdx++)
157
+ for (int loopX = 0, tileOutX = tileOutXBase; loopX < p.loopX & tileOutX < p.outW; loopX++, tileOutX += tileOutW)
158
+ {
159
+ // Load input pixels.
160
+ int tileMidX = tileOutX * downx + upx - 1 - p.padx0;
161
+ int tileMidY = tileOutY * downy + upy - 1 - p.pady0;
162
+ int tileInX = floorDiv(tileMidX, upx);
163
+ int tileInY = floorDiv(tileMidY, upy);
164
+ __syncthreads();
165
+ for (int inIdx = threadIdx.x; inIdx < tileInH * tileInW; inIdx += blockDim.x)
166
+ {
167
+ int relInY = inIdx / tileInW;
168
+ int relInX = inIdx - relInY * tileInW;
169
+ int inX = relInX + tileInX;
170
+ int inY = relInY + tileInY;
171
+ float v = 0.0f;
172
+ if (inX >= 0 & inY >= 0 & inX < p.inW & inY < p.inH)
173
+ v = (float)p.x[((majorIdx * p.inH + inY) * p.inW + inX) * p.minorDim + minorIdx];
174
+ sx[relInY][relInX] = v;
175
+ }
176
+
177
+ // Loop over output pixels.
178
+ __syncthreads();
179
+ for (int outIdx = threadIdx.x; outIdx < tileOutH * tileOutW; outIdx += blockDim.x)
180
+ {
181
+ int relOutY = outIdx / tileOutW;
182
+ int relOutX = outIdx - relOutY * tileOutW;
183
+ int outX = relOutX + tileOutX;
184
+ int outY = relOutY + tileOutY;
185
+
186
+ // Setup receptive field.
187
+ int midX = tileMidX + relOutX * downx;
188
+ int midY = tileMidY + relOutY * downy;
189
+ int inX = floorDiv(midX, upx);
190
+ int inY = floorDiv(midY, upy);
191
+ int relInX = inX - tileInX;
192
+ int relInY = inY - tileInY;
193
+ int kernelX = (inX + 1) * upx - midX - 1; // flipped
194
+ int kernelY = (inY + 1) * upy - midY - 1; // flipped
195
+
196
+ // Inner loop.
197
+ float v = 0.0f;
198
+ #pragma unroll
199
+ for (int y = 0; y < kernelH / upy; y++)
200
+ #pragma unroll
201
+ for (int x = 0; x < kernelW / upx; x++)
202
+ v += sx[relInY + y][relInX + x] * sk[kernelY + y * upy][kernelX + x * upx];
203
+
204
+ // Store result.
205
+ if (outX < p.outW & outY < p.outH)
206
+ p.y[((majorIdx * p.outH + outY) * p.outW + outX) * p.minorDim + minorIdx] = (T)v;
207
+ }
208
+ }
209
+ }
210
+
211
+ //------------------------------------------------------------------------
212
+ // TensorFlow op.
213
+
214
+ template <class T>
215
+ struct UpFirDn2DOp : public OpKernel
216
+ {
217
+ UpFirDn2DKernelParams<T> m_attribs;
218
+
219
+ UpFirDn2DOp(OpKernelConstruction* ctx) : OpKernel(ctx)
220
+ {
221
+ memset(&m_attribs, 0, sizeof(m_attribs));
222
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("upx", &m_attribs.upx));
223
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("upy", &m_attribs.upy));
224
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("downx", &m_attribs.downx));
225
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("downy", &m_attribs.downy));
226
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("padx0", &m_attribs.padx0));
227
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("padx1", &m_attribs.padx1));
228
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("pady0", &m_attribs.pady0));
229
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("pady1", &m_attribs.pady1));
230
+ OP_REQUIRES(ctx, m_attribs.upx >= 1 && m_attribs.upy >= 1, errors::InvalidArgument("upx and upy must be at least 1x1"));
231
+ OP_REQUIRES(ctx, m_attribs.downx >= 1 && m_attribs.downy >= 1, errors::InvalidArgument("downx and downy must be at least 1x1"));
232
+ }
233
+
234
+ void Compute(OpKernelContext* ctx)
235
+ {
236
+ UpFirDn2DKernelParams<T> p = m_attribs;
237
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
238
+
239
+ const Tensor& x = ctx->input(0); // [majorDim, inH, inW, minorDim]
240
+ const Tensor& k = ctx->input(1); // [kernelH, kernelW]
241
+ p.x = x.flat<T>().data();
242
+ p.k = k.flat<T>().data();
243
+ OP_REQUIRES(ctx, x.dims() == 4, errors::InvalidArgument("input must have rank 4"));
244
+ OP_REQUIRES(ctx, k.dims() == 2, errors::InvalidArgument("kernel must have rank 2"));
245
+ OP_REQUIRES(ctx, x.NumElements() <= kint32max, errors::InvalidArgument("input too large"));
246
+ OP_REQUIRES(ctx, k.NumElements() <= kint32max, errors::InvalidArgument("kernel too large"));
247
+
248
+ p.majorDim = (int)x.dim_size(0);
249
+ p.inH = (int)x.dim_size(1);
250
+ p.inW = (int)x.dim_size(2);
251
+ p.minorDim = (int)x.dim_size(3);
252
+ p.kernelH = (int)k.dim_size(0);
253
+ p.kernelW = (int)k.dim_size(1);
254
+ OP_REQUIRES(ctx, p.kernelW >= 1 && p.kernelH >= 1, errors::InvalidArgument("kernel must be at least 1x1"));
255
+
256
+ p.outW = (p.inW * p.upx + p.padx0 + p.padx1 - p.kernelW + p.downx) / p.downx;
257
+ p.outH = (p.inH * p.upy + p.pady0 + p.pady1 - p.kernelH + p.downy) / p.downy;
258
+ OP_REQUIRES(ctx, p.outW >= 1 && p.outH >= 1, errors::InvalidArgument("output must be at least 1x1"));
259
+
260
+ Tensor* y = NULL; // [majorDim, outH, outW, minorDim]
261
+ TensorShape ys;
262
+ ys.AddDim(p.majorDim);
263
+ ys.AddDim(p.outH);
264
+ ys.AddDim(p.outW);
265
+ ys.AddDim(p.minorDim);
266
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, ys, &y));
267
+ p.y = y->flat<T>().data();
268
+ OP_REQUIRES(ctx, y->NumElements() <= kint32max, errors::InvalidArgument("output too large"));
269
+
270
+ // Choose CUDA kernel to use.
271
+ void* cudaKernel = (void*)UpFirDn2DKernel_large<T>;
272
+ int tileOutW = -1;
273
+ int tileOutH = -1;
274
+ if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 7 && p.kernelH <= 7) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 7,7, 64,16>; tileOutW = 64; tileOutH = 16; }
275
+ if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 6 && p.kernelH <= 6) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 6,6, 64,16>; tileOutW = 64; tileOutH = 16; }
276
+ if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 5 && p.kernelH <= 5) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 5,5, 64,16>; tileOutW = 64; tileOutH = 16; }
277
+ if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 4 && p.kernelH <= 4) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 4,4, 64,16>; tileOutW = 64; tileOutH = 16; }
278
+ if (p.upx == 1 && p.upy == 1 && p.downx == 1 && p.downy == 1 && p.kernelW <= 3 && p.kernelH <= 3) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 1,1, 3,3, 64,16>; tileOutW = 64; tileOutH = 16; }
279
+ if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 8 && p.kernelH <= 8) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 8,8, 64,16>; tileOutW = 64; tileOutH = 16; }
280
+ if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 6 && p.kernelH <= 6) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 6,6, 64,16>; tileOutW = 64; tileOutH = 16; }
281
+ if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 4 && p.kernelH <= 4) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 4,4, 64,16>; tileOutW = 64; tileOutH = 16; }
282
+ if (p.upx == 2 && p.upy == 2 && p.downx == 1 && p.downy == 1 && p.kernelW <= 2 && p.kernelH <= 2) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 2,2, 1,1, 2,2, 64,16>; tileOutW = 64; tileOutH = 16; }
283
+ if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 8 && p.kernelH <= 8) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 8,8, 32,8>; tileOutW = 32; tileOutH = 8; }
284
+ if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 6 && p.kernelH <= 6) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 6,6, 32,8>; tileOutW = 32; tileOutH = 8; }
285
+ if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 4 && p.kernelH <= 4) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 4,4, 32,8>; tileOutW = 32; tileOutH = 8; }
286
+ if (p.upx == 1 && p.upy == 1 && p.downx == 2 && p.downy == 2 && p.kernelW <= 2 && p.kernelH <= 2) { cudaKernel = (void*)UpFirDn2DKernel_small<T, 1,1, 2,2, 2,2, 32,8>; tileOutW = 32; tileOutH = 8; }
287
+
288
+ // Choose launch params.
289
+ dim3 blockSize;
290
+ dim3 gridSize;
291
+ if (tileOutW > 0 && tileOutH > 0) // small
292
+ {
293
+ p.loopMajor = (p.majorDim - 1) / 16384 + 1;
294
+ p.loopX = 1;
295
+ blockSize = dim3(32 * 8, 1, 1);
296
+ gridSize = dim3(((p.outH - 1) / tileOutH + 1) * p.minorDim, (p.outW - 1) / (p.loopX * tileOutW) + 1, (p.majorDim - 1) / p.loopMajor + 1);
297
+ }
298
+ else // large
299
+ {
300
+ p.loopMajor = (p.majorDim - 1) / 16384 + 1;
301
+ p.loopX = 4;
302
+ blockSize = dim3(4, 32, 1);
303
+ gridSize = dim3((p.outH * p.minorDim - 1) / blockSize.x + 1, (p.outW - 1) / (p.loopX * blockSize.y) + 1, (p.majorDim - 1) / p.loopMajor + 1);
304
+ }
305
+
306
+ // Launch CUDA kernel.
307
+ void* args[] = {&p};
308
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(cudaKernel, gridSize, blockSize, args, 0, stream));
309
+ }
310
+ };
311
+
312
+ REGISTER_OP("UpFirDn2D")
313
+ .Input ("x: T")
314
+ .Input ("k: T")
315
+ .Output ("y: T")
316
+ .Attr ("T: {float, half}")
317
+ .Attr ("upx: int = 1")
318
+ .Attr ("upy: int = 1")
319
+ .Attr ("downx: int = 1")
320
+ .Attr ("downy: int = 1")
321
+ .Attr ("padx0: int = 0")
322
+ .Attr ("padx1: int = 0")
323
+ .Attr ("pady0: int = 0")
324
+ .Attr ("pady1: int = 0");
325
+ REGISTER_KERNEL_BUILDER(Name("UpFirDn2D").Device(DEVICE_GPU).TypeConstraint<float>("T"), UpFirDn2DOp<float>);
326
+ REGISTER_KERNEL_BUILDER(Name("UpFirDn2D").Device(DEVICE_GPU).TypeConstraint<Eigen::half>("T"), UpFirDn2DOp<Eigen::half>);
327
+
328
+ //------------------------------------------------------------------------
stylegan_human/dnnlib/tflib/ops/upfirdn_2d.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ #
5
+ # This work is made available under the Nvidia Source Code License-NC.
6
+ # To view a copy of this license, visit
7
+ # https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ """Custom TensorFlow ops for efficient resampling of 2D images."""
10
+
11
+ import os
12
+ import numpy as np
13
+ import tensorflow as tf
14
+ from .. import custom_ops
15
+
16
+ def _get_plugin():
17
+ return custom_ops.get_plugin(os.path.splitext(__file__)[0] + '.cu')
18
+
19
+ #----------------------------------------------------------------------------
20
+
21
+ def upfirdn_2d(x, k, upx=1, upy=1, downx=1, downy=1, padx0=0, padx1=0, pady0=0, pady1=0, impl='cuda'):
22
+ r"""Pad, upsample, FIR filter, and downsample a batch of 2D images.
23
+
24
+ Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]`
25
+ and performs the following operations for each image, batched across
26
+ `majorDim` and `minorDim`:
27
+
28
+ 1. Pad the image with zeros by the specified number of pixels on each side
29
+ (`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value
30
+ corresponds to cropping the image.
31
+
32
+ 2. Upsample the image by inserting the zeros after each pixel (`upx`, `upy`).
33
+
34
+ 3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the
35
+ image so that the footprint of all output pixels lies within the input image.
36
+
37
+ 4. Downsample the image by throwing away pixels (`downx`, `downy`).
38
+
39
+ This sequence of operations bears close resemblance to scipy.signal.upfirdn().
40
+ The fused op is considerably more efficient than performing the same calculation
41
+ using standard TensorFlow ops. It supports gradients of arbitrary order.
42
+
43
+ Args:
44
+ x: Input tensor of the shape `[majorDim, inH, inW, minorDim]`.
45
+ k: 2D FIR filter of the shape `[firH, firW]`.
46
+ upx: Integer upsampling factor along the X-axis (default: 1).
47
+ upy: Integer upsampling factor along the Y-axis (default: 1).
48
+ downx: Integer downsampling factor along the X-axis (default: 1).
49
+ downy: Integer downsampling factor along the Y-axis (default: 1).
50
+ padx0: Number of pixels to pad on the left side (default: 0).
51
+ padx1: Number of pixels to pad on the right side (default: 0).
52
+ pady0: Number of pixels to pad on the top side (default: 0).
53
+ pady1: Number of pixels to pad on the bottom side (default: 0).
54
+ impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
55
+
56
+ Returns:
57
+ Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same datatype as `x`.
58
+ """
59
+
60
+ impl_dict = {
61
+ 'ref': _upfirdn_2d_ref,
62
+ 'cuda': _upfirdn_2d_cuda,
63
+ }
64
+ return impl_dict[impl](x=x, k=k, upx=upx, upy=upy, downx=downx, downy=downy, padx0=padx0, padx1=padx1, pady0=pady0, pady1=pady1)
65
+
66
+ #----------------------------------------------------------------------------
67
+
68
+ def _upfirdn_2d_ref(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
69
+ """Slow reference implementation of `upfirdn_2d()` using standard TensorFlow ops."""
70
+
71
+ x = tf.convert_to_tensor(x)
72
+ k = np.asarray(k, dtype=np.float32)
73
+ assert x.shape.rank == 4
74
+ inH = x.shape[1].value
75
+ inW = x.shape[2].value
76
+ minorDim = _shape(x, 3)
77
+ kernelH, kernelW = k.shape
78
+ assert inW >= 1 and inH >= 1
79
+ assert kernelW >= 1 and kernelH >= 1
80
+ assert isinstance(upx, int) and isinstance(upy, int)
81
+ assert isinstance(downx, int) and isinstance(downy, int)
82
+ assert isinstance(padx0, int) and isinstance(padx1, int)
83
+ assert isinstance(pady0, int) and isinstance(pady1, int)
84
+
85
+ # Upsample (insert zeros).
86
+ x = tf.reshape(x, [-1, inH, 1, inW, 1, minorDim])
87
+ x = tf.pad(x, [[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]])
88
+ x = tf.reshape(x, [-1, inH * upy, inW * upx, minorDim])
89
+
90
+ # Pad (crop if negative).
91
+ x = tf.pad(x, [[0, 0], [max(pady0, 0), max(pady1, 0)], [max(padx0, 0), max(padx1, 0)], [0, 0]])
92
+ x = x[:, max(-pady0, 0) : x.shape[1].value - max(-pady1, 0), max(-padx0, 0) : x.shape[2].value - max(-padx1, 0), :]
93
+
94
+ # Convolve with filter.
95
+ x = tf.transpose(x, [0, 3, 1, 2])
96
+ x = tf.reshape(x, [-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1])
97
+ w = tf.constant(k[::-1, ::-1, np.newaxis, np.newaxis], dtype=x.dtype)
98
+ x = tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='VALID', data_format='NCHW')
99
+ x = tf.reshape(x, [-1, minorDim, inH * upy + pady0 + pady1 - kernelH + 1, inW * upx + padx0 + padx1 - kernelW + 1])
100
+ x = tf.transpose(x, [0, 2, 3, 1])
101
+
102
+ # Downsample (throw away pixels).
103
+ return x[:, ::downy, ::downx, :]
104
+
105
+ #----------------------------------------------------------------------------
106
+
107
+ def _upfirdn_2d_cuda(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
108
+ """Fast CUDA implementation of `upfirdn_2d()` using custom ops."""
109
+
110
+ x = tf.convert_to_tensor(x)
111
+ k = np.asarray(k, dtype=np.float32)
112
+ majorDim, inH, inW, minorDim = x.shape.as_list()
113
+ kernelH, kernelW = k.shape
114
+ assert inW >= 1 and inH >= 1
115
+ assert kernelW >= 1 and kernelH >= 1
116
+ assert isinstance(upx, int) and isinstance(upy, int)
117
+ assert isinstance(downx, int) and isinstance(downy, int)
118
+ assert isinstance(padx0, int) and isinstance(padx1, int)
119
+ assert isinstance(pady0, int) and isinstance(pady1, int)
120
+
121
+ outW = (inW * upx + padx0 + padx1 - kernelW) // downx + 1
122
+ outH = (inH * upy + pady0 + pady1 - kernelH) // downy + 1
123
+ assert outW >= 1 and outH >= 1
124
+
125
+ kc = tf.constant(k, dtype=x.dtype)
126
+ gkc = tf.constant(k[::-1, ::-1], dtype=x.dtype)
127
+ gpadx0 = kernelW - padx0 - 1
128
+ gpady0 = kernelH - pady0 - 1
129
+ gpadx1 = inW * upx - outW * downx + padx0 - upx + 1
130
+ gpady1 = inH * upy - outH * downy + pady0 - upy + 1
131
+
132
+ @tf.custom_gradient
133
+ def func(x):
134
+ y = _get_plugin().up_fir_dn2d(x=x, k=kc, upx=upx, upy=upy, downx=downx, downy=downy, padx0=padx0, padx1=padx1, pady0=pady0, pady1=pady1)
135
+ y.set_shape([majorDim, outH, outW, minorDim])
136
+ @tf.custom_gradient
137
+ def grad(dy):
138
+ dx = _get_plugin().up_fir_dn2d(x=dy, k=gkc, upx=downx, upy=downy, downx=upx, downy=upy, padx0=gpadx0, padx1=gpadx1, pady0=gpady0, pady1=gpady1)
139
+ dx.set_shape([majorDim, inH, inW, minorDim])
140
+ return dx, func
141
+ return y, grad
142
+ return func(x)
143
+
144
+ #----------------------------------------------------------------------------
145
+
146
+ def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'):
147
+ r"""Filter a batch of 2D images with the given FIR filter.
148
+
149
+ Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
150
+ and filters each image with the given filter. The filter is normalized so that
151
+ if the input pixels are constant, they will be scaled by the specified `gain`.
152
+ Pixels outside the image are assumed to be zero.
153
+
154
+ Args:
155
+ x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
156
+ k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
157
+ gain: Scaling factor for signal magnitude (default: 1.0).
158
+ data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
159
+ impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
160
+
161
+ Returns:
162
+ Tensor of the same shape and datatype as `x`.
163
+ """
164
+
165
+ k = _setup_kernel(k) * gain
166
+ p = k.shape[0] - 1
167
+ return _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
168
+
169
+ #----------------------------------------------------------------------------
170
+
171
+ def upsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
172
+ r"""Upsample a batch of 2D images with the given filter.
173
+
174
+ Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
175
+ and upsamples each image with the given filter. The filter is normalized so that
176
+ if the input pixels are constant, they will be scaled by the specified `gain`.
177
+ Pixels outside the image are assumed to be zero, and the filter is padded with
178
+ zeros so that its shape is a multiple of the upsampling factor.
179
+
180
+ Args:
181
+ x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
182
+ k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
183
+ The default is `[1] * factor`, which corresponds to nearest-neighbor
184
+ upsampling.
185
+ factor: Integer upsampling factor (default: 2).
186
+ gain: Scaling factor for signal magnitude (default: 1.0).
187
+ data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
188
+ impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
189
+
190
+ Returns:
191
+ Tensor of the shape `[N, C, H * factor, W * factor]` or
192
+ `[N, H * factor, W * factor, C]`, and same datatype as `x`.
193
+ """
194
+
195
+ assert isinstance(factor, int) and factor >= 1
196
+ if k is None:
197
+ k = [1] * factor
198
+ k = _setup_kernel(k) * (gain * (factor ** 2))
199
+ p = k.shape[0] - factor
200
+ return _simple_upfirdn_2d(x, k, up=factor, pad0=(p+1)//2+factor-1, pad1=p//2, data_format=data_format, impl=impl)
201
+
202
+ #----------------------------------------------------------------------------
203
+
204
+ def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
205
+ r"""Downsample a batch of 2D images with the given filter.
206
+
207
+ Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
208
+ and downsamples each image with the given filter. The filter is normalized so that
209
+ if the input pixels are constant, they will be scaled by the specified `gain`.
210
+ Pixels outside the image are assumed to be zero, and the filter is padded with
211
+ zeros so that its shape is a multiple of the downsampling factor.
212
+
213
+ Args:
214
+ x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
215
+ k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
216
+ The default is `[1] * factor`, which corresponds to average pooling.
217
+ factor: Integer downsampling factor (default: 2).
218
+ gain: Scaling factor for signal magnitude (default: 1.0).
219
+ data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
220
+ impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
221
+
222
+ Returns:
223
+ Tensor of the shape `[N, C, H // factor, W // factor]` or
224
+ `[N, H // factor, W // factor, C]`, and same datatype as `x`.
225
+ """
226
+
227
+ assert isinstance(factor, int) and factor >= 1
228
+ if k is None:
229
+ k = [1] * factor
230
+ k = _setup_kernel(k) * gain
231
+ p = k.shape[0] - factor
232
+ return _simple_upfirdn_2d(x, k, down=factor, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
233
+
234
+ #----------------------------------------------------------------------------
235
+
236
+ def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
237
+ r"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
238
+
239
+ Padding is performed only once at the beginning, not between the operations.
240
+ The fused op is considerably more efficient than performing the same calculation
241
+ using standard TensorFlow ops. It supports gradients of arbitrary order.
242
+
243
+ Args:
244
+ x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
245
+ w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
246
+ Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
247
+ k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
248
+ The default is `[1] * factor`, which corresponds to nearest-neighbor
249
+ upsampling.
250
+ factor: Integer upsampling factor (default: 2).
251
+ gain: Scaling factor for signal magnitude (default: 1.0).
252
+ data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
253
+ impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
254
+
255
+ Returns:
256
+ Tensor of the shape `[N, C, H * factor, W * factor]` or
257
+ `[N, H * factor, W * factor, C]`, and same datatype as `x`.
258
+ """
259
+
260
+ assert isinstance(factor, int) and factor >= 1
261
+
262
+ # Check weight shape.
263
+ w = tf.convert_to_tensor(w)
264
+ assert w.shape.rank == 4
265
+ convH = w.shape[0].value
266
+ convW = w.shape[1].value
267
+ inC = _shape(w, 2)
268
+ outC = _shape(w, 3)
269
+ assert convW == convH
270
+
271
+ # Setup filter kernel.
272
+ if k is None:
273
+ k = [1] * factor
274
+ k = _setup_kernel(k) * (gain * (factor ** 2))
275
+ p = (k.shape[0] - factor) - (convW - 1)
276
+
277
+ # Determine data dimensions.
278
+ if data_format == 'NCHW':
279
+ stride = [1, 1, factor, factor]
280
+ output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW]
281
+ num_groups = _shape(x, 1) // inC
282
+ else:
283
+ stride = [1, factor, factor, 1]
284
+ output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC]
285
+ num_groups = _shape(x, 3) // inC
286
+
287
+ # Transpose weights.
288
+ w = tf.reshape(w, [convH, convW, inC, num_groups, -1])
289
+ w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])
290
+ w = tf.reshape(w, [convH, convW, -1, num_groups * inC])
291
+
292
+ # Execute.
293
+ x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format)
294
+ return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl)
295
+
296
+ #----------------------------------------------------------------------------
297
+
298
+ def conv_downsample_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
299
+ r"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
300
+
301
+ Padding is performed only once at the beginning, not between the operations.
302
+ The fused op is considerably more efficient than performing the same calculation
303
+ using standard TensorFlow ops. It supports gradients of arbitrary order.
304
+
305
+ Args:
306
+ x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
307
+ w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
308
+ Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
309
+ k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
310
+ The default is `[1] * factor`, which corresponds to average pooling.
311
+ factor: Integer downsampling factor (default: 2).
312
+ gain: Scaling factor for signal magnitude (default: 1.0).
313
+ data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
314
+ impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
315
+
316
+ Returns:
317
+ Tensor of the shape `[N, C, H // factor, W // factor]` or
318
+ `[N, H // factor, W // factor, C]`, and same datatype as `x`.
319
+ """
320
+
321
+ assert isinstance(factor, int) and factor >= 1
322
+ w = tf.convert_to_tensor(w)
323
+ convH, convW, _inC, _outC = w.shape.as_list()
324
+ assert convW == convH
325
+ if k is None:
326
+ k = [1] * factor
327
+ k = _setup_kernel(k) * gain
328
+ p = (k.shape[0] - factor) + (convW - 1)
329
+ if data_format == 'NCHW':
330
+ s = [1, 1, factor, factor]
331
+ else:
332
+ s = [1, factor, factor, 1]
333
+ x = _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
334
+ return tf.nn.conv2d(x, w, strides=s, padding='VALID', data_format=data_format)
335
+
336
+ #----------------------------------------------------------------------------
337
+ # Internal helper funcs.
338
+
339
+ def _shape(tf_expr, dim_idx):
340
+ if tf_expr.shape.rank is not None:
341
+ dim = tf_expr.shape[dim_idx].value
342
+ if dim is not None:
343
+ return dim
344
+ return tf.shape(tf_expr)[dim_idx]
345
+
346
+ def _setup_kernel(k):
347
+ k = np.asarray(k, dtype=np.float32)
348
+ if k.ndim == 1:
349
+ k = np.outer(k, k)
350
+ k /= np.sum(k)
351
+ assert k.ndim == 2
352
+ assert k.shape[0] == k.shape[1]
353
+ return k
354
+
355
+ def _simple_upfirdn_2d(x, k, up=1, down=1, pad0=0, pad1=0, data_format='NCHW', impl='cuda'):
356
+ assert data_format in ['NCHW', 'NHWC']
357
+ assert x.shape.rank == 4
358
+ y = x
359
+ if data_format == 'NCHW':
360
+ y = tf.reshape(y, [-1, _shape(y, 2), _shape(y, 3), 1])
361
+ y = upfirdn_2d(y, k, upx=up, upy=up, downx=down, downy=down, padx0=pad0, padx1=pad1, pady0=pad0, pady1=pad1, impl=impl)
362
+ if data_format == 'NCHW':
363
+ y = tf.reshape(y, [-1, _shape(x, 1), _shape(y, 1), _shape(y, 2)])
364
+ return y
365
+
366
+ #----------------------------------------------------------------------------
stylegan_human/dnnlib/tflib/optimizer.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ #
5
+ # This work is made available under the Nvidia Source Code License-NC.
6
+ # To view a copy of this license, visit
7
+ # https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ """Helper wrapper for a Tensorflow optimizer."""
10
+
11
+ import numpy as np
12
+ import tensorflow as tf
13
+
14
+ from collections import OrderedDict
15
+ from typing import List, Union
16
+
17
+ from . import autosummary
18
+ from . import tfutil
19
+ from .. import util
20
+
21
+ from .tfutil import TfExpression, TfExpressionEx
22
+
23
+ try:
24
+ # TensorFlow 1.13
25
+ from tensorflow.python.ops import nccl_ops
26
+ except:
27
+ # Older TensorFlow versions
28
+ import tensorflow.contrib.nccl as nccl_ops
29
+
30
+ class Optimizer:
31
+ """A Wrapper for tf.train.Optimizer.
32
+
33
+ Automatically takes care of:
34
+ - Gradient averaging for multi-GPU training.
35
+ - Gradient accumulation for arbitrarily large minibatches.
36
+ - Dynamic loss scaling and typecasts for FP16 training.
37
+ - Ignoring corrupted gradients that contain NaNs/Infs.
38
+ - Reporting statistics.
39
+ - Well-chosen default settings.
40
+ """
41
+
42
+ def __init__(self,
43
+ name: str = "Train", # Name string that will appear in TensorFlow graph.
44
+ tf_optimizer: str = "tf.train.AdamOptimizer", # Underlying optimizer class.
45
+ learning_rate: TfExpressionEx = 0.001, # Learning rate. Can vary over time.
46
+ minibatch_multiplier: TfExpressionEx = None, # Treat N consecutive minibatches as one by accumulating gradients.
47
+ share: "Optimizer" = None, # Share internal state with a previously created optimizer?
48
+ use_loss_scaling: bool = False, # Enable dynamic loss scaling for robust mixed-precision training?
49
+ loss_scaling_init: float = 64.0, # Log2 of initial loss scaling factor.
50
+ loss_scaling_inc: float = 0.0005, # Log2 of per-minibatch loss scaling increment when there is no overflow.
51
+ loss_scaling_dec: float = 1.0, # Log2 of per-minibatch loss scaling decrement when there is an overflow.
52
+ report_mem_usage: bool = False, # Report fine-grained memory usage statistics in TensorBoard?
53
+ **kwargs):
54
+
55
+ # Public fields.
56
+ self.name = name
57
+ self.learning_rate = learning_rate
58
+ self.minibatch_multiplier = minibatch_multiplier
59
+ self.id = self.name.replace("/", ".")
60
+ self.scope = tf.get_default_graph().unique_name(self.id)
61
+ self.optimizer_class = util.get_obj_by_name(tf_optimizer)
62
+ self.optimizer_kwargs = dict(kwargs)
63
+ self.use_loss_scaling = use_loss_scaling
64
+ self.loss_scaling_init = loss_scaling_init
65
+ self.loss_scaling_inc = loss_scaling_inc
66
+ self.loss_scaling_dec = loss_scaling_dec
67
+
68
+ # Private fields.
69
+ self._updates_applied = False
70
+ self._devices = OrderedDict() # device_name => EasyDict()
71
+ self._shared_optimizers = OrderedDict() # device_name => optimizer_class
72
+ self._gradient_shapes = None # [shape, ...]
73
+ self._report_mem_usage = report_mem_usage
74
+
75
+ # Validate arguments.
76
+ assert callable(self.optimizer_class)
77
+
78
+ # Share internal state if requested.
79
+ if share is not None:
80
+ assert isinstance(share, Optimizer)
81
+ assert self.optimizer_class is share.optimizer_class
82
+ assert self.learning_rate is share.learning_rate
83
+ assert self.optimizer_kwargs == share.optimizer_kwargs
84
+ self._shared_optimizers = share._shared_optimizers # pylint: disable=protected-access
85
+
86
+ def _get_device(self, device_name: str):
87
+ """Get internal state for the given TensorFlow device."""
88
+ tfutil.assert_tf_initialized()
89
+ if device_name in self._devices:
90
+ return self._devices[device_name]
91
+
92
+ # Initialize fields.
93
+ device = util.EasyDict()
94
+ device.name = device_name
95
+ device.optimizer = None # Underlying optimizer: optimizer_class
96
+ device.loss_scaling_var = None # Log2 of loss scaling: tf.Variable
97
+ device.grad_raw = OrderedDict() # Raw gradients: var => [grad, ...]
98
+ device.grad_clean = OrderedDict() # Clean gradients: var => grad
99
+ device.grad_acc_vars = OrderedDict() # Accumulation sums: var => tf.Variable
100
+ device.grad_acc_count = None # Accumulation counter: tf.Variable
101
+ device.grad_acc = OrderedDict() # Accumulated gradients: var => grad
102
+
103
+ # Setup TensorFlow objects.
104
+ with tfutil.absolute_name_scope(self.scope + "/Devices"), tf.device(device_name), tf.control_dependencies(None):
105
+ if device_name not in self._shared_optimizers:
106
+ optimizer_name = self.scope.replace("/", "_") + "_opt%d" % len(self._shared_optimizers)
107
+ self._shared_optimizers[device_name] = self.optimizer_class(name=optimizer_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
108
+ device.optimizer = self._shared_optimizers[device_name]
109
+ if self.use_loss_scaling:
110
+ device.loss_scaling_var = tf.Variable(np.float32(self.loss_scaling_init), trainable=False, name="loss_scaling_var")
111
+
112
+ # Register device.
113
+ self._devices[device_name] = device
114
+ return device
115
+
116
+ def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None:
117
+ """Register the gradients of the given loss function with respect to the given variables.
118
+ Intended to be called once per GPU."""
119
+ tfutil.assert_tf_initialized()
120
+ assert not self._updates_applied
121
+ device = self._get_device(loss.device)
122
+
123
+ # Validate trainables.
124
+ if isinstance(trainable_vars, dict):
125
+ trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars
126
+ assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1
127
+ assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss])
128
+ assert all(var.device == device.name for var in trainable_vars)
129
+
130
+ # Validate shapes.
131
+ if self._gradient_shapes is None:
132
+ self._gradient_shapes = [var.shape.as_list() for var in trainable_vars]
133
+ assert len(trainable_vars) == len(self._gradient_shapes)
134
+ assert all(var.shape.as_list() == var_shape for var, var_shape in zip(trainable_vars, self._gradient_shapes))
135
+
136
+ # Report memory usage if requested.
137
+ deps = []
138
+ if self._report_mem_usage:
139
+ self._report_mem_usage = False
140
+ try:
141
+ with tf.name_scope(self.id + '_mem'), tf.device(device.name), tf.control_dependencies([loss]):
142
+ deps.append(autosummary.autosummary(self.id + "/mem_usage_gb", tf.contrib.memory_stats.BytesInUse() / 2**30))
143
+ except tf.errors.NotFoundError:
144
+ pass
145
+
146
+ # Compute gradients.
147
+ with tf.name_scope(self.id + "_grad"), tf.device(device.name), tf.control_dependencies(deps):
148
+ loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
149
+ gate = tf.train.Optimizer.GATE_NONE # disable gating to reduce memory usage
150
+ grad_list = device.optimizer.compute_gradients(loss=loss, var_list=trainable_vars, gate_gradients=gate)
151
+
152
+ # Register gradients.
153
+ for grad, var in grad_list:
154
+ if var not in device.grad_raw:
155
+ device.grad_raw[var] = []
156
+ device.grad_raw[var].append(grad)
157
+
158
+ def apply_updates(self, allow_no_op: bool = False) -> tf.Operation:
159
+ """Construct training op to update the registered variables based on their gradients."""
160
+ tfutil.assert_tf_initialized()
161
+ assert not self._updates_applied
162
+ self._updates_applied = True
163
+ all_ops = []
164
+
165
+ # Check for no-op.
166
+ if allow_no_op and len(self._devices) == 0:
167
+ with tfutil.absolute_name_scope(self.scope):
168
+ return tf.no_op(name='TrainingOp')
169
+
170
+ # Clean up gradients.
171
+ for device_idx, device in enumerate(self._devices.values()):
172
+ with tfutil.absolute_name_scope(self.scope + "/Clean%d" % device_idx), tf.device(device.name):
173
+ for var, grad in device.grad_raw.items():
174
+
175
+ # Filter out disconnected gradients and convert to float32.
176
+ grad = [g for g in grad if g is not None]
177
+ grad = [tf.cast(g, tf.float32) for g in grad]
178
+
179
+ # Sum within the device.
180
+ if len(grad) == 0:
181
+ grad = tf.zeros(var.shape) # No gradients => zero.
182
+ elif len(grad) == 1:
183
+ grad = grad[0] # Single gradient => use as is.
184
+ else:
185
+ grad = tf.add_n(grad) # Multiple gradients => sum.
186
+
187
+ # Scale as needed.
188
+ scale = 1.0 / len(device.grad_raw[var]) / len(self._devices)
189
+ scale = tf.constant(scale, dtype=tf.float32, name="scale")
190
+ if self.minibatch_multiplier is not None:
191
+ scale /= tf.cast(self.minibatch_multiplier, tf.float32)
192
+ scale = self.undo_loss_scaling(scale)
193
+ device.grad_clean[var] = grad * scale
194
+
195
+ # Sum gradients across devices.
196
+ if len(self._devices) > 1:
197
+ with tfutil.absolute_name_scope(self.scope + "/Broadcast"), tf.device(None):
198
+ for all_vars in zip(*[device.grad_clean.keys() for device in self._devices.values()]):
199
+ if len(all_vars) > 0 and all(dim > 0 for dim in all_vars[0].shape.as_list()): # NCCL does not support zero-sized tensors.
200
+ all_grads = [device.grad_clean[var] for device, var in zip(self._devices.values(), all_vars)]
201
+ all_grads = nccl_ops.all_sum(all_grads)
202
+ for device, var, grad in zip(self._devices.values(), all_vars, all_grads):
203
+ device.grad_clean[var] = grad
204
+
205
+ # Apply updates separately on each device.
206
+ for device_idx, device in enumerate(self._devices.values()):
207
+ with tfutil.absolute_name_scope(self.scope + "/Apply%d" % device_idx), tf.device(device.name):
208
+ # pylint: disable=cell-var-from-loop
209
+
210
+ # Accumulate gradients over time.
211
+ if self.minibatch_multiplier is None:
212
+ acc_ok = tf.constant(True, name='acc_ok')
213
+ device.grad_acc = OrderedDict(device.grad_clean)
214
+ else:
215
+ # Create variables.
216
+ with tf.control_dependencies(None):
217
+ for var in device.grad_clean.keys():
218
+ device.grad_acc_vars[var] = tf.Variable(tf.zeros(var.shape), trainable=False, name="grad_acc_var")
219
+ device.grad_acc_count = tf.Variable(tf.zeros([]), trainable=False, name="grad_acc_count")
220
+
221
+ # Track counter.
222
+ count_cur = device.grad_acc_count + 1.0
223
+ count_inc_op = lambda: tf.assign(device.grad_acc_count, count_cur)
224
+ count_reset_op = lambda: tf.assign(device.grad_acc_count, tf.zeros([]))
225
+ acc_ok = (count_cur >= tf.cast(self.minibatch_multiplier, tf.float32))
226
+ all_ops.append(tf.cond(acc_ok, count_reset_op, count_inc_op))
227
+
228
+ # Track gradients.
229
+ for var, grad in device.grad_clean.items():
230
+ acc_var = device.grad_acc_vars[var]
231
+ acc_cur = acc_var + grad
232
+ device.grad_acc[var] = acc_cur
233
+ with tf.control_dependencies([acc_cur]):
234
+ acc_inc_op = lambda: tf.assign(acc_var, acc_cur)
235
+ acc_reset_op = lambda: tf.assign(acc_var, tf.zeros(var.shape))
236
+ all_ops.append(tf.cond(acc_ok, acc_reset_op, acc_inc_op))
237
+
238
+ # No overflow => apply gradients.
239
+ all_ok = tf.reduce_all(tf.stack([acc_ok] + [tf.reduce_all(tf.is_finite(g)) for g in device.grad_acc.values()]))
240
+ apply_op = lambda: device.optimizer.apply_gradients([(tf.cast(grad, var.dtype), var) for var, grad in device.grad_acc.items()])
241
+ all_ops.append(tf.cond(all_ok, apply_op, tf.no_op))
242
+
243
+ # Adjust loss scaling.
244
+ if self.use_loss_scaling:
245
+ ls_inc_op = lambda: tf.assign_add(device.loss_scaling_var, self.loss_scaling_inc)
246
+ ls_dec_op = lambda: tf.assign_sub(device.loss_scaling_var, self.loss_scaling_dec)
247
+ ls_update_op = lambda: tf.group(tf.cond(all_ok, ls_inc_op, ls_dec_op))
248
+ all_ops.append(tf.cond(acc_ok, ls_update_op, tf.no_op))
249
+
250
+ # Last device => report statistics.
251
+ if device_idx == len(self._devices) - 1:
252
+ all_ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate))
253
+ all_ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(all_ok, 0, 1), condition=acc_ok))
254
+ if self.use_loss_scaling:
255
+ all_ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", device.loss_scaling_var))
256
+
257
+ # Initialize variables.
258
+ self.reset_optimizer_state()
259
+ if self.use_loss_scaling:
260
+ tfutil.init_uninitialized_vars([device.loss_scaling_var for device in self._devices.values()])
261
+ if self.minibatch_multiplier is not None:
262
+ tfutil.run([var.initializer for device in self._devices.values() for var in list(device.grad_acc_vars.values()) + [device.grad_acc_count]])
263
+
264
+ # Group everything into a single op.
265
+ with tfutil.absolute_name_scope(self.scope):
266
+ return tf.group(*all_ops, name="TrainingOp")
267
+
268
+ def reset_optimizer_state(self) -> None:
269
+ """Reset internal state of the underlying optimizer."""
270
+ tfutil.assert_tf_initialized()
271
+ tfutil.run([var.initializer for device in self._devices.values() for var in device.optimizer.variables()])
272
+
273
+ def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]:
274
+ """Get or create variable representing log2 of the current dynamic loss scaling factor."""
275
+ return self._get_device(device).loss_scaling_var
276
+
277
+ def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
278
+ """Apply dynamic loss scaling for the given expression."""
279
+ assert tfutil.is_tf_expression(value)
280
+ if not self.use_loss_scaling:
281
+ return value
282
+ return value * tfutil.exp2(self.get_loss_scaling_var(value.device))
283
+
284
+ def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
285
+ """Undo the effect of dynamic loss scaling for the given expression."""
286
+ assert tfutil.is_tf_expression(value)
287
+ if not self.use_loss_scaling:
288
+ return value
289
+ return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type
290
+
291
+
292
+ class SimpleAdam:
293
+ """Simplified version of tf.train.AdamOptimizer that behaves identically when used with dnnlib.tflib.Optimizer."""
294
+
295
+ def __init__(self, name="Adam", learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8):
296
+ self.name = name
297
+ self.learning_rate = learning_rate
298
+ self.beta1 = beta1
299
+ self.beta2 = beta2
300
+ self.epsilon = epsilon
301
+ self.all_state_vars = []
302
+
303
+ def variables(self):
304
+ return self.all_state_vars
305
+
306
+ def compute_gradients(self, loss, var_list, gate_gradients=tf.train.Optimizer.GATE_NONE):
307
+ assert gate_gradients == tf.train.Optimizer.GATE_NONE
308
+ return list(zip(tf.gradients(loss, var_list), var_list))
309
+
310
+ def apply_gradients(self, grads_and_vars):
311
+ with tf.name_scope(self.name):
312
+ state_vars = []
313
+ update_ops = []
314
+
315
+ # Adjust learning rate to deal with startup bias.
316
+ with tf.control_dependencies(None):
317
+ b1pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False)
318
+ b2pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False)
319
+ state_vars += [b1pow_var, b2pow_var]
320
+ b1pow_new = b1pow_var * self.beta1
321
+ b2pow_new = b2pow_var * self.beta2
322
+ update_ops += [tf.assign(b1pow_var, b1pow_new), tf.assign(b2pow_var, b2pow_new)]
323
+ lr_new = self.learning_rate * tf.sqrt(1 - b2pow_new) / (1 - b1pow_new)
324
+
325
+ # Construct ops to update each variable.
326
+ for grad, var in grads_and_vars:
327
+ with tf.control_dependencies(None):
328
+ m_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False)
329
+ v_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False)
330
+ state_vars += [m_var, v_var]
331
+ m_new = self.beta1 * m_var + (1 - self.beta1) * grad
332
+ v_new = self.beta2 * v_var + (1 - self.beta2) * tf.square(grad)
333
+ var_delta = lr_new * m_new / (tf.sqrt(v_new) + self.epsilon)
334
+ update_ops += [tf.assign(m_var, m_new), tf.assign(v_var, v_new), tf.assign_sub(var, var_delta)]
335
+
336
+ # Group everything together.
337
+ self.all_state_vars += state_vars
338
+ return tf.group(*update_ops)
stylegan_human/dnnlib/tflib/tfutil.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
+ #
5
+ # This work is made available under the Nvidia Source Code License-NC.
6
+ # To view a copy of this license, visit
7
+ # https://nvlabs.github.io/stylegan2/license.html
8
+
9
+ """Miscellaneous helper utils for Tensorflow."""
10
+
11
+ import os
12
+ import numpy as np
13
+ import tensorflow as tf
14
+
15
+ # Silence deprecation warnings from TensorFlow 1.13 onwards
16
+ import logging
17
+ logging.getLogger('tensorflow').setLevel(logging.ERROR)
18
+ import tensorflow.contrib # requires TensorFlow 1.x!
19
+ tf.contrib = tensorflow.contrib
20
+
21
+ from typing import Any, Iterable, List, Union
22
+
23
+ TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation]
24
+ """A type that represents a valid Tensorflow expression."""
25
+
26
+ TfExpressionEx = Union[TfExpression, int, float, np.ndarray]
27
+ """A type that can be converted to a valid Tensorflow expression."""
28
+
29
+
30
+ def run(*args, **kwargs) -> Any:
31
+ """Run the specified ops in the default session."""
32
+ assert_tf_initialized()
33
+ return tf.get_default_session().run(*args, **kwargs)
34
+
35
+
36
+ def is_tf_expression(x: Any) -> bool:
37
+ """Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation."""
38
+ return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation))
39
+
40
+
41
+ def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:
42
+ """Convert a Tensorflow shape to a list of ints. Retained for backwards compatibility -- use TensorShape.as_list() in new code."""
43
+ return [dim.value for dim in shape]
44
+
45
+
46
+ def flatten(x: TfExpressionEx) -> TfExpression:
47
+ """Shortcut function for flattening a tensor."""
48
+ with tf.name_scope("Flatten"):
49
+ return tf.reshape(x, [-1])
50
+
51
+
52
+ def log2(x: TfExpressionEx) -> TfExpression:
53
+ """Logarithm in base 2."""
54
+ with tf.name_scope("Log2"):
55
+ return tf.log(x) * np.float32(1.0 / np.log(2.0))
56
+
57
+
58
+ def exp2(x: TfExpressionEx) -> TfExpression:
59
+ """Exponent in base 2."""
60
+ with tf.name_scope("Exp2"):
61
+ return tf.exp(x * np.float32(np.log(2.0)))
62
+
63
+
64
+ def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx:
65
+ """Linear interpolation."""
66
+ with tf.name_scope("Lerp"):
67
+ return a + (b - a) * t
68
+
69
+
70
+ def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression:
71
+ """Linear interpolation with clip."""
72
+ with tf.name_scope("LerpClip"):
73
+ return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
74
+
75
+
76
+ def absolute_name_scope(scope: str) -> tf.name_scope:
77
+ """Forcefully enter the specified name scope, ignoring any surrounding scopes."""
78
+ return tf.name_scope(scope + "/")
79
+
80
+
81
+ def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope:
82
+ """Forcefully enter the specified variable scope, ignoring any surrounding scopes."""
83
+ return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False)
84
+
85
+
86
+ def _sanitize_tf_config(config_dict: dict = None) -> dict:
87
+ # Defaults.
88
+ cfg = dict()
89
+ cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is.
90
+ cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is.
91
+ cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
92
+ cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
93
+ cfg["gpu_options.allow_growth"] = True # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
94
+
95
+ # Remove defaults for environment variables that are already set.
96
+ for key in list(cfg):
97
+ fields = key.split(".")
98
+ if fields[0] == "env":
99
+ assert len(fields) == 2
100
+ if fields[1] in os.environ:
101
+ del cfg[key]
102
+
103
+ # User overrides.
104
+ if config_dict is not None:
105
+ cfg.update(config_dict)
106
+ return cfg
107
+
108
+
109
+ def init_tf(config_dict: dict = None) -> None:
110
+ """Initialize TensorFlow session using good default settings."""
111
+ # Skip if already initialized.
112
+ if tf.get_default_session() is not None:
113
+ return
114
+
115
+ # Setup config dict and random seeds.
116
+ cfg = _sanitize_tf_config(config_dict)
117
+ np_random_seed = cfg["rnd.np_random_seed"]
118
+ if np_random_seed is not None:
119
+ np.random.seed(np_random_seed)
120
+ tf_random_seed = cfg["rnd.tf_random_seed"]
121
+ if tf_random_seed == "auto":
122
+ tf_random_seed = np.random.randint(1 << 31)
123
+ if tf_random_seed is not None:
124
+ tf.set_random_seed(tf_random_seed)
125
+
126
+ # Setup environment variables.
127
+ for key, value in cfg.items():
128
+ fields = key.split(".")
129
+ if fields[0] == "env":
130
+ assert len(fields) == 2
131
+ os.environ[fields[1]] = str(value)
132
+
133
+ # Create default TensorFlow session.
134
+ create_session(cfg, force_as_default=True)
135
+
136
+
137
+ def assert_tf_initialized():
138
+ """Check that TensorFlow session has been initialized."""
139
+ if tf.get_default_session() is None:
140
+ raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().")
141
+
142
+
143
+ def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session:
144
+ """Create tf.Session based on config dict."""
145
+ # Setup TensorFlow config proto.
146
+ cfg = _sanitize_tf_config(config_dict)
147
+ config_proto = tf.ConfigProto()
148
+ for key, value in cfg.items():
149
+ fields = key.split(".")
150
+ if fields[0] not in ["rnd", "env"]:
151
+ obj = config_proto
152
+ for field in fields[:-1]:
153
+ obj = getattr(obj, field)
154
+ setattr(obj, fields[-1], value)
155
+
156
+ # Create session.
157
+ session = tf.Session(config=config_proto)
158
+ if force_as_default:
159
+ # pylint: disable=protected-access
160
+ session._default_session = session.as_default()
161
+ session._default_session.enforce_nesting = False
162
+ session._default_session.__enter__()
163
+ return session
164
+
165
+
166
+ def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None:
167
+ """Initialize all tf.Variables that have not already been initialized.
168
+
169
+ Equivalent to the following, but more efficient and does not bloat the tf graph:
170
+ tf.variables_initializer(tf.report_uninitialized_variables()).run()
171
+ """
172
+ assert_tf_initialized()
173
+ if target_vars is None:
174
+ target_vars = tf.global_variables()
175
+
176
+ test_vars = []
177
+ test_ops = []
178
+
179
+ with tf.control_dependencies(None): # ignore surrounding control_dependencies
180
+ for var in target_vars:
181
+ assert is_tf_expression(var)
182
+
183
+ try:
184
+ tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0"))
185
+ except KeyError:
186
+ # Op does not exist => variable may be uninitialized.
187
+ test_vars.append(var)
188
+
189
+ with absolute_name_scope(var.name.split(":")[0]):
190
+ test_ops.append(tf.is_variable_initialized(var))
191
+
192
+ init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
193
+ run([var.initializer for var in init_vars])
194
+
195
+
196
+ def set_vars(var_to_value_dict: dict) -> None:
197
+ """Set the values of given tf.Variables.
198
+
199
+ Equivalent to the following, but more efficient and does not bloat the tf graph:
200
+ tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
201
+ """
202
+ assert_tf_initialized()
203
+ ops = []
204
+ feed_dict = {}
205
+
206
+ for var, value in var_to_value_dict.items():
207
+ assert is_tf_expression(var)
208
+
209
+ try:
210
+ setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op
211
+ except KeyError:
212
+ with absolute_name_scope(var.name.split(":")[0]):
213
+ with tf.control_dependencies(None): # ignore surrounding control_dependencies
214
+ setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter
215
+
216
+ ops.append(setter)
217
+ feed_dict[setter.op.inputs[1]] = value
218
+
219
+ run(ops, feed_dict)
220
+
221
+
222
+ def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs):
223
+ """Create tf.Variable with large initial value without bloating the tf graph."""
224
+ assert_tf_initialized()
225
+ assert isinstance(initial_value, np.ndarray)
226
+ zeros = tf.zeros(initial_value.shape, initial_value.dtype)
227
+ var = tf.Variable(zeros, *args, **kwargs)
228
+ set_vars({var: initial_value})
229
+ return var
230
+
231
+
232
+ def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False):
233
+ """Convert a minibatch of images from uint8 to float32 with configurable dynamic range.
234
+ Can be used as an input transformation for Network.run().
235
+ """
236
+ images = tf.cast(images, tf.float32)
237
+ if nhwc_to_nchw:
238
+ images = tf.transpose(images, [0, 3, 1, 2])
239
+ return images * ((drange[1] - drange[0]) / 255) + drange[0]
240
+
241
+
242
+ def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1):
243
+ """Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
244
+ Can be used as an output transformation for Network.run().
245
+ """
246
+ images = tf.cast(images, tf.float32)
247
+ if shrink > 1:
248
+ ksize = [1, 1, shrink, shrink]
249
+ images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW")
250
+ if nchw_to_nhwc:
251
+ images = tf.transpose(images, [0, 2, 3, 1])
252
+ scale = 255 / (drange[1] - drange[0])
253
+ images = images * scale + (0.5 - drange[0] * scale)
254
+ return tf.saturate_cast(images, tf.uint8)
stylegan_human/dnnlib/util.py ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ο»Ώ# Copyright (c) SenseTime Research. All rights reserved.
2
+ # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
5
+ # and proprietary rights in and to this software, related documentation
6
+ # and any modifications thereto. Any use, reproduction, disclosure or
7
+ # distribution of this software and related documentation without an express
8
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
9
+
10
+ """Miscellaneous utility classes and functions."""
11
+
12
+ import ctypes
13
+ import fnmatch
14
+ import importlib
15
+ import inspect
16
+ import numpy as np
17
+ import os
18
+ import shutil
19
+ import sys
20
+ import types
21
+ import io
22
+ import pickle
23
+ import re
24
+ import requests
25
+ import html
26
+ import hashlib
27
+ import glob
28
+ import tempfile
29
+ import urllib
30
+ import urllib.request
31
+ import uuid
32
+
33
+ from distutils.util import strtobool
34
+ from typing import Any, List, Tuple, Union
35
+
36
+
37
+ # Util classes
38
+ # ------------------------------------------------------------------------------------------
39
+
40
+
41
+ class EasyDict(dict):
42
+ """Convenience class that behaves like a dict but allows access with the attribute syntax."""
43
+
44
+ def __getattr__(self, name: str) -> Any:
45
+ try:
46
+ return self[name]
47
+ except KeyError:
48
+ raise AttributeError(name)
49
+
50
+ def __setattr__(self, name: str, value: Any) -> None:
51
+ self[name] = value
52
+
53
+ def __delattr__(self, name: str) -> None:
54
+ del self[name]
55
+
56
+
57
+ class Logger(object):
58
+ """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
59
+
60
+ def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
61
+ self.file = None
62
+
63
+ if file_name is not None:
64
+ self.file = open(file_name, file_mode)
65
+
66
+ self.should_flush = should_flush
67
+ self.stdout = sys.stdout
68
+ self.stderr = sys.stderr
69
+
70
+ sys.stdout = self
71
+ sys.stderr = self
72
+
73
+ def __enter__(self) -> "Logger":
74
+ return self
75
+
76
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
77
+ self.close()
78
+
79
+ def write(self, text: Union[str, bytes]) -> None:
80
+ """Write text to stdout (and a file) and optionally flush."""
81
+ if isinstance(text, bytes):
82
+ text = text.decode()
83
+ if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
84
+ return
85
+
86
+ if self.file is not None:
87
+ self.file.write(text)
88
+
89
+ self.stdout.write(text)
90
+
91
+ if self.should_flush:
92
+ self.flush()
93
+
94
+ def flush(self) -> None:
95
+ """Flush written text to both stdout and a file, if open."""
96
+ if self.file is not None:
97
+ self.file.flush()
98
+
99
+ self.stdout.flush()
100
+
101
+ def close(self) -> None:
102
+ """Flush, close possible files, and remove stdout/stderr mirroring."""
103
+ self.flush()
104
+
105
+ # if using multiple loggers, prevent closing in wrong order
106
+ if sys.stdout is self:
107
+ sys.stdout = self.stdout
108
+ if sys.stderr is self:
109
+ sys.stderr = self.stderr
110
+
111
+ if self.file is not None:
112
+ self.file.close()
113
+ self.file = None
114
+
115
+
116
+ # Cache directories
117
+ # ------------------------------------------------------------------------------------------
118
+
119
+ _dnnlib_cache_dir = None
120
+
121
+ def set_cache_dir(path: str) -> None:
122
+ global _dnnlib_cache_dir
123
+ _dnnlib_cache_dir = path
124
+
125
+ def make_cache_dir_path(*paths: str) -> str:
126
+ if _dnnlib_cache_dir is not None:
127
+ return os.path.join(_dnnlib_cache_dir, *paths)
128
+ if 'DNNLIB_CACHE_DIR' in os.environ:
129
+ return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
130
+ if 'HOME' in os.environ:
131
+ return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
132
+ if 'USERPROFILE' in os.environ:
133
+ return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
134
+ return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
135
+
136
+ # Small util functions
137
+ # ------------------------------------------------------------------------------------------
138
+
139
+
140
+ def format_time(seconds: Union[int, float]) -> str:
141
+ """Convert the seconds to human readable string with days, hours, minutes and seconds."""
142
+ s = int(np.rint(seconds))
143
+
144
+ if s < 60:
145
+ return "{0}s".format(s)
146
+ elif s < 60 * 60:
147
+ return "{0}m {1:02}s".format(s // 60, s % 60)
148
+ elif s < 24 * 60 * 60:
149
+ return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
150
+ else:
151
+ return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
152
+
153
+
154
+ def ask_yes_no(question: str) -> bool:
155
+ """Ask the user the question until the user inputs a valid answer."""
156
+ while True:
157
+ try:
158
+ print("{0} [y/n]".format(question))
159
+ return strtobool(input().lower())
160
+ except ValueError:
161
+ pass
162
+
163
+
164
+ def tuple_product(t: Tuple) -> Any:
165
+ """Calculate the product of the tuple elements."""
166
+ result = 1
167
+
168
+ for v in t:
169
+ result *= v
170
+
171
+ return result
172
+
173
+
174
+ _str_to_ctype = {
175
+ "uint8": ctypes.c_ubyte,
176
+ "uint16": ctypes.c_uint16,
177
+ "uint32": ctypes.c_uint32,
178
+ "uint64": ctypes.c_uint64,
179
+ "int8": ctypes.c_byte,
180
+ "int16": ctypes.c_int16,
181
+ "int32": ctypes.c_int32,
182
+ "int64": ctypes.c_int64,
183
+ "float32": ctypes.c_float,
184
+ "float64": ctypes.c_double
185
+ }
186
+
187
+
188
+ def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
189
+ """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
190
+ type_str = None
191
+
192
+ if isinstance(type_obj, str):
193
+ type_str = type_obj
194
+ elif hasattr(type_obj, "__name__"):
195
+ type_str = type_obj.__name__
196
+ elif hasattr(type_obj, "name"):
197
+ type_str = type_obj.name
198
+ else:
199
+ raise RuntimeError("Cannot infer type name from input")
200
+
201
+ assert type_str in _str_to_ctype.keys()
202
+
203
+ my_dtype = np.dtype(type_str)
204
+ my_ctype = _str_to_ctype[type_str]
205
+
206
+ assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
207
+
208
+ return my_dtype, my_ctype
209
+
210
+
211
+ def is_pickleable(obj: Any) -> bool:
212
+ try:
213
+ with io.BytesIO() as stream:
214
+ pickle.dump(obj, stream)
215
+ return True
216
+ except:
217
+ return False
218
+
219
+
220
+ # Functionality to import modules/objects by name, and call functions by name
221
+ # ------------------------------------------------------------------------------------------
222
+
223
+ def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
224
+ """Searches for the underlying module behind the name to some python object.
225
+ Returns the module and the object name (original name with module part removed)."""
226
+
227
+ # allow convenience shorthands, substitute them by full names
228
+ obj_name = re.sub("^np.", "numpy.", obj_name)
229
+ obj_name = re.sub("^tf.", "tensorflow.", obj_name)
230
+
231
+ # list alternatives for (module_name, local_obj_name)
232
+ parts = obj_name.split(".")
233
+ name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
234
+
235
+ # try each alternative in turn
236
+ for module_name, local_obj_name in name_pairs:
237
+ try:
238
+ module = importlib.import_module(module_name) # may raise ImportError
239
+ get_obj_from_module(module, local_obj_name) # may raise AttributeError
240
+ return module, local_obj_name
241
+ except:
242
+ pass
243
+
244
+ # maybe some of the modules themselves contain errors?
245
+ for module_name, _local_obj_name in name_pairs:
246
+ try:
247
+ importlib.import_module(module_name) # may raise ImportError
248
+ except ImportError:
249
+ if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
250
+ raise
251
+
252
+ # maybe the requested attribute is missing?
253
+ for module_name, local_obj_name in name_pairs:
254
+ try:
255
+ module = importlib.import_module(module_name) # may raise ImportError
256
+ get_obj_from_module(module, local_obj_name) # may raise AttributeError
257
+ except ImportError:
258
+ pass
259
+
260
+ # we are out of luck, but we have no idea why
261
+ raise ImportError(obj_name)
262
+
263
+
264
+ def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
265
+ """Traverses the object name and returns the last (rightmost) python object."""
266
+ if obj_name == '':
267
+ return module
268
+ obj = module
269
+ for part in obj_name.split("."):
270
+ obj = getattr(obj, part)
271
+ return obj
272
+
273
+
274
+ def get_obj_by_name(name: str) -> Any:
275
+ """Finds the python object with the given name."""
276
+ module, obj_name = get_module_from_obj_name(name)
277
+ return get_obj_from_module(module, obj_name)
278
+
279
+
280
+ def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
281
+ """Finds the python object with the given name and calls it as a function."""
282
+ assert func_name is not None
283
+ # print('func_name: ', func_name) #'training.dataset.ImageFolderDataset'
284
+ func_obj = get_obj_by_name(func_name)
285
+ assert callable(func_obj)
286
+ return func_obj(*args, **kwargs)
287
+
288
+
289
+ def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
290
+ """Finds the python class with the given name and constructs it with the given arguments."""
291
+ return call_func_by_name(*args, func_name=class_name, **kwargs)
292
+
293
+
294
+ def get_module_dir_by_obj_name(obj_name: str) -> str:
295
+ """Get the directory path of the module containing the given object name."""
296
+ module, _ = get_module_from_obj_name(obj_name)
297
+ return os.path.dirname(inspect.getfile(module))
298
+
299
+
300
+ def is_top_level_function(obj: Any) -> bool:
301
+ """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
302
+ return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
303
+
304
+
305
+ def get_top_level_function_name(obj: Any) -> str:
306
+ """Return the fully-qualified name of a top-level function."""
307
+ assert is_top_level_function(obj)
308
+ module = obj.__module__
309
+ if module == '__main__':
310
+ module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
311
+ return module + "." + obj.__name__
312
+
313
+
314
+ # File system helpers
315
+ # ------------------------------------------------------------------------------------------
316
+
317
+ def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
318
+ """List all files recursively in a given directory while ignoring given file and directory names.
319
+ Returns list of tuples containing both absolute and relative paths."""
320
+ assert os.path.isdir(dir_path)
321
+ base_name = os.path.basename(os.path.normpath(dir_path))
322
+
323
+ if ignores is None:
324
+ ignores = []
325
+
326
+ result = []
327
+
328
+ for root, dirs, files in os.walk(dir_path, topdown=True):
329
+ for ignore_ in ignores:
330
+ dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
331
+
332
+ # dirs need to be edited in-place
333
+ for d in dirs_to_remove:
334
+ dirs.remove(d)
335
+
336
+ files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
337
+
338
+ absolute_paths = [os.path.join(root, f) for f in files]
339
+ relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
340
+
341
+ if add_base_to_relative:
342
+ relative_paths = [os.path.join(base_name, p) for p in relative_paths]
343
+
344
+ assert len(absolute_paths) == len(relative_paths)
345
+ result += zip(absolute_paths, relative_paths)
346
+
347
+ return result
348
+
349
+
350
+ def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
351
+ """Takes in a list of tuples of (src, dst) paths and copies files.
352
+ Will create all necessary directories."""
353
+ for file in files:
354
+ target_dir_name = os.path.dirname(file[1])
355
+
356
+ # will create all intermediate-level directories
357
+ if not os.path.exists(target_dir_name):
358
+ os.makedirs(target_dir_name)
359
+
360
+ shutil.copyfile(file[0], file[1])
361
+
362
+
363
+ # URL helpers
364
+ # ------------------------------------------------------------------------------------------
365
+
366
+ def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
367
+ """Determine whether the given object is a valid URL string."""
368
+ if not isinstance(obj, str) or not "://" in obj:
369
+ return False
370
+ if allow_file_urls and obj.startswith('file://'):
371
+ return True
372
+ try:
373
+ res = requests.compat.urlparse(obj)
374
+ if not res.scheme or not res.netloc or not "." in res.netloc:
375
+ return False
376
+ res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
377
+ if not res.scheme or not res.netloc or not "." in res.netloc:
378
+ return False
379
+ except:
380
+ return False
381
+ return True
382
+
383
+
384
+ def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
385
+ """Download the given URL and return a binary-mode file object to access the data."""
386
+ assert num_attempts >= 1
387
+ assert not (return_filename and (not cache))
388
+
389
+ # Doesn't look like an URL scheme so interpret it as a local filename.
390
+ if not re.match('^[a-z]+://', url):
391
+ return url if return_filename else open(url, "rb")
392
+
393
+ # Handle file URLs. This code handles unusual file:// patterns that
394
+ # arise on Windows:
395
+ #
396
+ # file:///c:/foo.txt
397
+ #
398
+ # which would translate to a local '/c:/foo.txt' filename that's
399
+ # invalid. Drop the forward slash for such pathnames.
400
+ #
401
+ # If you touch this code path, you should test it on both Linux and
402
+ # Windows.
403
+ #
404
+ # Some internet resources suggest using urllib.request.url2pathname() but
405
+ # but that converts forward slashes to backslashes and this causes
406
+ # its own set of problems.
407
+ if url.startswith('file://'):
408
+ filename = urllib.parse.urlparse(url).path
409
+ if re.match(r'^/[a-zA-Z]:', filename):
410
+ filename = filename[1:]
411
+ return filename if return_filename else open(filename, "rb")
412
+
413
+ assert is_url(url)
414
+
415
+ # Lookup from cache.
416
+ if cache_dir is None:
417
+ cache_dir = make_cache_dir_path('downloads')
418
+
419
+ url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
420
+ if cache:
421
+ cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
422
+ if len(cache_files) == 1:
423
+ filename = cache_files[0]
424
+ return filename if return_filename else open(filename, "rb")
425
+
426
+ # Download.
427
+ url_name = None
428
+ url_data = None
429
+ with requests.Session() as session:
430
+ if verbose:
431
+ print("Downloading %s ..." % url, end="", flush=True)
432
+ for attempts_left in reversed(range(num_attempts)):
433
+ try:
434
+ with session.get(url) as res:
435
+ res.raise_for_status()
436
+ if len(res.content) == 0:
437
+ raise IOError("No data received")
438
+
439
+ if len(res.content) < 8192:
440
+ content_str = res.content.decode("utf-8")
441
+ if "download_warning" in res.headers.get("Set-Cookie", ""):
442
+ links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
443
+ if len(links) == 1:
444
+ url = requests.compat.urljoin(url, links[0])
445
+ raise IOError("Google Drive virus checker nag")
446
+ if "Google Drive - Quota exceeded" in content_str:
447
+ raise IOError("Google Drive download quota exceeded -- please try again later")
448
+
449
+ match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
450
+ url_name = match[1] if match else url
451
+ url_data = res.content
452
+ if verbose:
453
+ print(" done")
454
+ break
455
+ except KeyboardInterrupt:
456
+ raise
457
+ except:
458
+ if not attempts_left:
459
+ if verbose:
460
+ print(" failed")
461
+ raise
462
+ if verbose:
463
+ print(".", end="", flush=True)
464
+
465
+ # Save to cache.
466
+ if cache:
467
+ safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
468
+ cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
469
+ temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
470
+ os.makedirs(cache_dir, exist_ok=True)
471
+ with open(temp_file, "wb") as f:
472
+ f.write(url_data)
473
+ os.replace(temp_file, cache_file) # atomic
474
+ if return_filename:
475
+ return cache_file
476
+
477
+ # Return data as file object.
478
+ assert not return_filename
479
+ return io.BytesIO(url_data)
stylegan_human/docs/Dataset.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SHHQ Dataset
2
+ <img src="../img/preview_samples1.png" width="96%" height="96%">
3
+
4
+ ## Overview
5
+ SHHQ is a dataset with high-quality full-body human images in a resolution of 1024 Γ— 512.
6
+ Since we need to follow a rigorous legal review in our institute, we can not release all of the data at once.
7
+
8
+ For now, SHHQ-1.0 with 40K images is released! More data will be released in the later versions.
9
+
10
+
11
+ ## Data Sources
12
+ Images are collected in two main ways:
13
+ 1) From the Internet.
14
+ We developed a crawler tool with an official API, mainly downloading images from Flickr, Pixabay and Pexels. So you need to meet all the following licenses when using the dataset: CC0, [Pixabay License](https://pixabay.com/service/license/), and [Pexels Licenses](https://www.pexels.com/license/).
15
+ 2) From the data providers.
16
+ We purchased images from databases of individual photographers, modeling agencies and other suppliers.
17
+ Images were reviewed by our legal team prior to purchase to ensure permission for use in research.
18
+
19
+ ### Note:
20
+ The composition of SHHQ-1.0:
21
+
22
+ 1) Images obtained from the above sources.
23
+ 2) Processed 9991 DeepFashion [[1]](#1) images (retain only full body images).
24
+ 3) 1940 African images from the InFashAI [[2]](#2) dataset to increase data diversity.
25
+
26
+ ## Data License
27
+ We are aware of privacy concerns and seriously treat the license and privacy issues. All released data will be ensured under the license of CC0 and free for research use. Also, persons in the dataset are anonymised without additional private or sensitive metadata.
28
+
29
+ ## Agreement
30
+ The SHHQ is available for non-commercial research purposes only.
31
+
32
+ You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit any portion of the images and any portion of the derived data for commercial purposes.
33
+
34
+ You agree NOT to further copy, publish or distribute any portion of SHHQ to any third party for any purpose. Except, for internal use at a single site within the same organization it is allowed to make copies of the dataset.
35
+
36
+ Shanghai AI Lab reserves the right to terminate your access to the SHHQ at any time.
37
+
38
+ ## Dataset Preview
39
+ For those interested in our dataset, we provide a preview version with 100 images randomly sampled from SHHQ-1.0: [SHHQ-1.0_samples](https://drive.google.com/file/d/1tnNFfmFtzRbYL3qEnNXQ_ShaN9YV5tI5/view?usp=sharing).
40
+
41
+ In SHHQ-1.0, we provide aligned raw images along with machine-calculated segmentation masks. Later we are planning to release manually annotated human-parsing version of these 40,000 images. Please stay tuned.
42
+
43
+ > We also provide script [bg_white.py](../bg_white.py) to whiten the background of the raw image using its segmentation mask.
44
+
45
+ If you want to access the full SHHQ-1.0, please read the following instructions.
46
+
47
+ ## Model trained using SHHQ-1.0
48
+
49
+ | Structure | 1024x512 | Metric | Scores | 512x256 | Metric | Scores |
50
+ | --------- |:----------:| :----------:| :----------:| :-----: | :-----: | :-----: |
51
+ | StyleGAN1 | to be released | - | - | to be released | - | - |
52
+ | StyleGAN2 | [SHHQ-1.0_sg2_1024.pkl](https://drive.google.com/file/d/1PuvE72xpc69Zq4y58dohuKbG9dFnnjEX/view?usp=sharing) | fid50k_full | 3.56 | [SHHQ-1.0_sg2_512.pkl](https://drive.google.com/file/d/170t2FRWxR8_TG3_y0nVtDBogLPOClnyf/view?usp=sharing) | fid50k_full | 3.68 |
53
+ | StyleGAN3 | to be released | - | - |to be released | - | - |
54
+
55
+
56
+ ## Download Instructions
57
+ Please download the SHHQ Dataset Release Agreement from [link](./SHHQ_Dataset_Release_Agreement.pdf).
58
+ Read it carefully, complete and sign it appropriately.
59
+
60
+ Please send the completed form to Jianglin Fu ([email protected]) and Shikai Li ([email protected]), and cc to Wayne Wu ([email protected]) using institutional email address. The email Subject Title is "SHHQ Dataset Release Agreement". We will verify your request and contact you with the dataset link and password to unzip the image data.
61
+
62
+ Note:
63
+
64
+ 1. We are currently facing large incoming applications, and we need to carefully verify all the applicants, please be patient, and we will reply to you as soon as possible.
65
+
66
+ 2. The signature in the agreement should be hand-written.
67
+
68
+ ## References
69
+ <a id="1">[1]</a>
70
+ Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou. DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations. CVPR (2016)
71
+
72
+ <a id="2">[2]</a>
73
+ Hacheme, Gilles and Sayouti, Noureini. Neural fashion image captioning: Accounting for data diversity. arXiv preprint arXiv:2106.12154 (2021)
74
+
stylegan_human/docs/SHHQ_Dataset_Release_Agreement.pdf ADDED
Binary file (107 kB). View file
 
stylegan_human/edit.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ import os
4
+ import sys
5
+ import torch
6
+ import numpy as np
7
+ sys.path.append(".")
8
+ from torch_utils.models import Generator
9
+ import click
10
+ import cv2
11
+ from typing import List, Optional
12
+ import subprocess
13
+ import legacy
14
+ from edit.edit_helper import conv_warper, decoder, encoder_ifg, encoder_ss, encoder_sefa
15
+
16
+
17
+ """
18
+ Edit generated images with different SOTA methods.
19
+ Notes:
20
+ 1. We provide some latent directions in the folder, you can play around with them.
21
+ 2. ''upper_length'' and ''bottom_length'' of ''attr_name'' are available for demo.
22
+ 3. Layers to control and editing strength are set in edit/edit_config.py.
23
+
24
+ Examples:
25
+
26
+ \b
27
+ # Editing with InterfaceGAN, StyleSpace, and Sefa
28
+ python edit.py --network pretrained_models/stylegan_human_v2_1024.pkl --attr_name upper_length \\
29
+ --seeds 61531,61570,61571,61610 --outdir outputs/edit_results
30
+
31
+
32
+ # Editing using inverted latent code
33
+ python edit.py ---network outputs/pti/checkpoints/model_test.pkl --attr_name upper_length \\
34
+ --outdir outputs/edit_results --real True --real_w_path outputs/pti/embeddings/test/PTI/test/0.pt --real_img_path aligned_image/test.png
35
+
36
+ """
37
+
38
+
39
+
40
+ @click.command()
41
+ @click.pass_context
42
+ @click.option('--network', 'ckpt_path', help='Network pickle filename', required=True)
43
+ @click.option('--attr_name', help='choose one of the attr: upper_length or bottom_length', type=str, required=True)
44
+ @click.option('--trunc', 'truncation', type=float, help='Truncation psi', default=0.8, show_default=True)
45
+ @click.option('--gen_video', type=bool, default=True, help='If want to generate video')
46
+ @click.option('--combine', type=bool, default=True, help='If want to combine different editing results in the same frame')
47
+ @click.option('--seeds', type=legacy.num_range, help='List of random seeds')
48
+ @click.option('--outdir', help='Where to save the output images', type=str, required=True, default='outputs/editing', metavar='DIR')
49
+ @click.option('--real', type=bool, help='True for editing real image', default=False)
50
+ @click.option('--real_w_path', help='Path of latent code for real image')
51
+ @click.option('--real_img_path', help='Path of real image, this just concat real image with inverted and edited results together')
52
+
53
+
54
+
55
+ def main(
56
+ ctx: click.Context,
57
+ ckpt_path: str,
58
+ attr_name: str,
59
+ truncation: float,
60
+ gen_video: bool,
61
+ combine: bool,
62
+ seeds: Optional[List[int]],
63
+ outdir: str,
64
+ real: str,
65
+ real_w_path: str,
66
+ real_img_path: str
67
+ ):
68
+ ## convert pkl to pth
69
+ # if not os.path.exists(ckpt_path.replace('.pkl','.pth')):
70
+ legacy.convert(ckpt_path, ckpt_path.replace('.pkl','.pth'), G_only=real)
71
+ ckpt_path = ckpt_path.replace('.pkl','.pth')
72
+ print("start...", flush=True)
73
+ config = {"latent" : 512, "n_mlp" : 8, "channel_multiplier": 2}
74
+ generator = Generator(
75
+ size = 1024,
76
+ style_dim=config["latent"],
77
+ n_mlp=config["n_mlp"],
78
+ channel_multiplier=config["channel_multiplier"]
79
+ )
80
+
81
+ generator.load_state_dict(torch.load(ckpt_path)['g_ema'])
82
+ generator.eval().cuda()
83
+
84
+ with torch.no_grad():
85
+ mean_path = os.path.join('edit','mean_latent.pkl')
86
+ if not os.path.exists(mean_path):
87
+ mean_n = 3000
88
+ mean_latent = generator.mean_latent(mean_n).detach()
89
+ legacy.save_obj(mean_latent, mean_path)
90
+ else:
91
+ mean_latent = legacy.load_pkl(mean_path).cuda()
92
+ finals = []
93
+
94
+ ## -- selected sample seeds -- ##
95
+ # seeds = [60948,60965,61174,61210,61511,61598,61610] #bottom -> long
96
+ # [60941,61064,61103,61313,61531,61570,61571] # bottom -> short
97
+ # [60941,60965,61064,61103,6117461210,61531,61570,61571,61610] # upper --> long
98
+ # [60948,61313,61511,61598] # upper --> short
99
+ if real: seeds = [0]
100
+
101
+ for t in seeds:
102
+ if real: # now assume process single real image only
103
+ if real_img_path:
104
+ real_image = cv2.imread(real_img_path)
105
+ real_image = cv2.cvtColor(real_image, cv2.COLOR_BGR2RGB)
106
+ import torchvision.transforms as transforms
107
+ transform = transforms.Compose( # normalize to (-1, 1)
108
+ [transforms.ToTensor(),
109
+ transforms.Normalize(mean=(.5,.5,.5), std=(.5,.5,.5))]
110
+ )
111
+ real_image = transform(real_image).unsqueeze(0).cuda()
112
+
113
+ test_input = torch.load(real_w_path)
114
+ output, _ = generator(test_input, False, truncation=1,input_is_latent=True, real=True)
115
+
116
+ else: # generate image from random seeds
117
+ test_input = torch.from_numpy(np.random.RandomState(t).randn(1, 512)).float().cuda() # torch.Size([1, 512])
118
+ output, _ = generator([test_input], False, truncation=truncation, truncation_latent=mean_latent, real=real)
119
+
120
+ # interfacegan
121
+ style_space, latent, noise = encoder_ifg(generator, test_input, attr_name, truncation, mean_latent,real=real)
122
+ image1 = decoder(generator, style_space, latent, noise)
123
+ # stylespace
124
+ style_space, latent, noise = encoder_ss(generator, test_input, attr_name, truncation, mean_latent,real=real)
125
+ image2 = decoder(generator, style_space, latent, noise)
126
+ # sefa
127
+ latent, noise = encoder_sefa(generator, test_input, attr_name, truncation, mean_latent,real=real)
128
+ image3, _ = generator([latent], noise=noise, input_is_latent=True)
129
+ if real_img_path:
130
+ final = torch.cat((real_image, output, image1, image2, image3), 3)
131
+ else:
132
+ final = torch.cat((output, image1, image2, image3), 3)
133
+
134
+ # legacy.visual(output, f'{outdir}/{attr_name}_{t:05d}_raw.jpg')
135
+ # legacy.visual(image1, f'{outdir}/{attr_name}_{t:05d}_ifg.jpg')
136
+ # legacy.visual(image2, f'{outdir}/{attr_name}_{t:05d}_ss.jpg')
137
+ # legacy.visual(image3, f'{outdir}/{attr_name}_{t:05d}_sefa.jpg')
138
+
139
+ if gen_video:
140
+ total_step = 90
141
+ if real:
142
+ video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{real_w_path.split('/')[-2]}/"
143
+ video_ss_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/"
144
+ video_sefa_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/"
145
+ else:
146
+ video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{t:05d}/"
147
+ video_ss_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/"
148
+ video_sefa_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/"
149
+ video_comb_path = f"{outdir}/video/tmp"
150
+
151
+ if combine:
152
+ if not os.path.exists(video_comb_path):
153
+ os.makedirs(video_comb_path)
154
+ else:
155
+ if not os.path.exists(video_ifg_path):
156
+ os.makedirs(video_ifg_path)
157
+ if not os.path.exists(video_ss_path):
158
+ os.makedirs(video_ss_path)
159
+ if not os.path.exists(video_sefa_path):
160
+ os.makedirs(video_sefa_path)
161
+ for i in range(total_step):
162
+ style_space, latent, noise = encoder_ifg(generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step,real=real)
163
+ image1 = decoder(generator, style_space, latent, noise)
164
+ style_space, latent, noise = encoder_ss(generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step,real=real)
165
+ image2 = decoder(generator, style_space, latent, noise)
166
+ latent, noise = encoder_sefa(generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step,real=real)
167
+ image3, _ = generator([latent], noise=noise, input_is_latent=True)
168
+ if combine:
169
+ if real_img_path:
170
+ comb_img = torch.cat((real_image, output, image1, image2, image3), 3)
171
+ else:
172
+ comb_img = torch.cat((output, image1, image2, image3), 3)
173
+ legacy.visual(comb_img, os.path.join(video_comb_path, f'{i:05d}.jpg'))
174
+ else:
175
+ legacy.visual(image1, os.path.join(video_ifg_path, f'{i:05d}.jpg'))
176
+ legacy.visual(image2, os.path.join(video_ss_path, f'{i:05d}.jpg'))
177
+ if combine:
178
+ cmd=f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_comb_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path.replace('ifg_', '')[:-1] + '.mp4'}"
179
+ subprocess.call(cmd, shell=True)
180
+ else:
181
+ cmd=f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ifg_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path[:-1] + '.mp4'}"
182
+ subprocess.call(cmd, shell=True)
183
+ cmd=f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ss_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ss_path[:-1] + '.mp4'}"
184
+ subprocess.call(cmd, shell=True)
185
+
186
+ # interfacegan, stylespace, sefa
187
+ finals.append(final)
188
+
189
+ final = torch.cat(finals, 2)
190
+ legacy.visual(final, os.path.join(outdir,'final.jpg'))
191
+
192
+
193
+ if __name__ == "__main__":
194
+ main()
stylegan_human/edit/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ # empty
stylegan_human/edit/edit_config.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ attr_dict = dict(
4
+ interface_gan={ # strength
5
+ 'upper_length': [-1], # strength: negative for shorter, positive for longer
6
+ 'bottom_length': [1]
7
+ },
8
+ stylespace={ # layer, strength, threshold
9
+ 'upper_length': [5, -5, 0.0028], # strength: negative for shorter, positive for longer
10
+ 'bottom_length': [3, 5, 0.003]
11
+ },
12
+ sefa={ # layer, strength
13
+ 'upper_length': [[4, 5, 6, 7], 5], #-5 # strength: negative for longer, positive for shorter
14
+ 'bottom_length': [[4, 5, 6, 7], 5]
15
+ }
16
+ )
stylegan_human/edit/edit_helper.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) SenseTime Research. All rights reserved.
2
+
3
+ from legacy import save_obj, load_pkl
4
+ import torch
5
+ from torch.nn import functional as F
6
+ import pandas as pd
7
+ from .edit_config import attr_dict
8
+ import os
9
+
10
+ def conv_warper(layer, input, style, noise):
11
+ # the conv should change
12
+ conv = layer.conv
13
+ batch, in_channel, height, width = input.shape
14
+
15
+ style = style.view(batch, 1, in_channel, 1, 1)
16
+ weight = conv.scale * conv.weight * style
17
+
18
+ if conv.demodulate:
19
+ demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
20
+ weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
21
+
22
+ weight = weight.view(
23
+ batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
24
+ )
25
+
26
+ if conv.upsample:
27
+ input = input.view(1, batch * in_channel, height, width)
28
+ weight = weight.view(
29
+ batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
30
+ )
31
+ weight = weight.transpose(1, 2).reshape(
32
+ batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
33
+ )
34
+ out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
35
+ _, _, height, width = out.shape
36
+ out = out.view(batch, conv.out_channel, height, width)
37
+ out = conv.blur(out)
38
+
39
+ elif conv.downsample:
40
+ input = conv.blur(input)
41
+ _, _, height, width = input.shape
42
+ input = input.view(1, batch * in_channel, height, width)
43
+ out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
44
+ _, _, height, width = out.shape
45
+ out = out.view(batch, conv.out_channel, height, width)
46
+
47
+ else:
48
+ input = input.view(1, batch * in_channel, height, width)
49
+ out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
50
+ _, _, height, width = out.shape
51
+ out = out.view(batch, conv.out_channel, height, width)
52
+
53
+ out = layer.noise(out, noise=noise)
54
+ out = layer.activate(out)
55
+
56
+ return out
57
+
58
+ def decoder(G, style_space, latent, noise):
59
+ # an decoder warper for G
60
+ out = G.input(latent)
61
+ out = conv_warper(G.conv1, out, style_space[0], noise[0])
62
+ skip = G.to_rgb1(out, latent[:, 1])
63
+
64
+ i = 1
65
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
66
+ G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
67
+ ):
68
+ out = conv_warper(conv1, out, style_space[i], noise=noise1)
69
+ out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
70
+ skip = to_rgb(out, latent[:, i + 2], skip)
71
+ i += 2
72
+ image = skip
73
+
74
+ return image
75
+
76
+ def encoder_ifg(G, noise, attr_name, truncation=1, truncation_latent=None,
77
+ latent_dir='latent_direction/ss/',
78
+ step=0, total=0, real=False):
79
+ if not real:
80
+ styles = [noise]
81
+ styles = [G.style(s) for s in styles]
82
+ style_space = []
83
+
84
+ if truncation<1:
85
+ if not real:
86
+ style_t = []
87
+ for style in styles:
88
+ style_t.append(truncation_latent + truncation * (style - truncation_latent))
89
+ styles = style_t
90
+ else: # styles are latent (tensor: 1,18,512), for real PTI output
91
+ truncation_latent = truncation_latent.repeat(18,1).unsqueeze(0) # (1,512) --> (1,18,512)
92
+ styles = torch.add(truncation_latent,torch.mul(torch.sub(noise,truncation_latent),truncation))
93
+
94
+
95
+ noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
96
+ if not real:
97
+ inject_index = G.n_latent
98
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
99
+ else: latent=styles
100
+
101
+ style_space.append(G.conv1.conv.modulation(latent[:, 0]))
102
+ i = 1
103
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
104
+ G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
105
+ ):
106
+ style_space.append(conv1.conv.modulation(latent[:, i]))
107
+ style_space.append(conv2.conv.modulation(latent[:, i+1]))
108
+ i += 2
109
+
110
+ # get layer, strength by dict
111
+ strength = attr_dict['interface_gan'][attr_name][0]
112
+
113
+ if step != 0 and total != 0:
114
+ strength = step / total * strength
115
+ for i in range(15):
116
+ style_vect = load_pkl(os.path.join(latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, i)))
117
+ style_vect = torch.from_numpy(style_vect).to(latent.device).float()
118
+ style_space[i] += style_vect * strength
119
+
120
+ return style_space, latent, noise
121
+
122
+ def encoder_ss(G, noise, attr_name, truncation=1, truncation_latent=None,
123
+ statics_dir="latent_direction/ss_statics",
124
+ latent_dir="latent_direction/ss/",
125
+ step=0, total=0,real=False):
126
+ if not real:
127
+ styles = [noise]
128
+ styles = [G.style(s) for s in styles]
129
+ style_space = []
130
+
131
+ if truncation<1:
132
+ if not real:
133
+ style_t = []
134
+ for style in styles:
135
+ style_t.append(
136
+ truncation_latent + truncation * (style - truncation_latent)
137
+ )
138
+ styles = style_t
139
+ else: # styles are latent (tensor: 1,18,512), for real PTI output
140
+ truncation_latent = truncation_latent.repeat(18,1).unsqueeze(0) # (1,512) --> (1,18,512)
141
+ styles = torch.add(truncation_latent,torch.mul(torch.sub(noise,truncation_latent),truncation))
142
+
143
+ noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
144
+
145
+ if not real:
146
+ inject_index = G.n_latent
147
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
148
+ else: latent = styles
149
+
150
+ style_space.append(G.conv1.conv.modulation(latent[:, 0]))
151
+ i = 1
152
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
153
+ G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
154
+ ):
155
+ style_space.append(conv1.conv.modulation(latent[:, i]))
156
+ style_space.append(conv2.conv.modulation(latent[:, i+1]))
157
+ i += 2
158
+ # get threshold, layer, strength by dict
159
+ layer, strength, threshold = attr_dict['stylespace'][attr_name]
160
+
161
+ statis_dir = os.path.join(statics_dir, "{}_statis/{}".format(attr_name, layer))
162
+ statis_csv_path = os.path.join(statis_dir, "statis.csv")
163
+ statis_df = pd.read_csv(statis_csv_path)
164
+ statis_df = statis_df.sort_values(by='channel', ascending=True)
165
+ ch_mask = statis_df['strength'].values
166
+ ch_mask = torch.from_numpy(ch_mask).to(latent.device).float()
167
+ ch_mask = (ch_mask.abs()>threshold).float()
168
+ style_vect = load_pkl(os.path.join(latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, layer)))
169
+ style_vect = torch.from_numpy(style_vect).to(latent.device).float()
170
+
171
+ style_vect = style_vect * ch_mask
172
+
173
+ if step != 0 and total != 0:
174
+ strength = step / total * strength
175
+
176
+ style_space[layer] += style_vect * strength
177
+
178
+ return style_space, latent, noise
179
+
180
+ def encoder_sefa(G, noise, attr_name, truncation=1, truncation_latent=None,
181
+ latent_dir='latent_direction/sefa/',
182
+ step=0, total=0, real=False):
183
+ if not real:
184
+ styles = [noise]
185
+ styles = [G.style(s) for s in styles]
186
+
187
+ if truncation<1:
188
+ if not real:
189
+ style_t = []
190
+ for style in styles:
191
+ style_t.append(
192
+ truncation_latent + truncation * (style - truncation_latent)
193
+ )
194
+ styles = style_t
195
+ else:
196
+ truncation_latent = truncation_latent.repeat(18,1).unsqueeze(0) # (1,512) --> (1,18,512)
197
+ styles = torch.add(truncation_latent,torch.mul(torch.sub(noise,truncation_latent),truncation))
198
+
199
+
200
+ noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
201
+ if not real:
202
+ inject_index = G.n_latent
203
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
204
+ else: latent = styles
205
+
206
+ layer, strength = attr_dict['sefa'][attr_name]
207
+
208
+ sefa_vect = torch.load(os.path.join(latent_dir, '{}.pt'.format(attr_name))).to(latent.device).float()
209
+ if step != 0 and total != 0:
210
+ strength = step / total * strength
211
+ for l in layer:
212
+ latent[:, l, :] += (sefa_vect * strength * 2)
213
+
214
+
215
+ return latent, noise
stylegan_human/environment.yml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: stylehuman
2
+ channels:
3
+ - pytorch
4
+ - nvidia
5
+ dependencies:
6
+ - python == 3.8
7
+ - pip
8
+ - numpy>=1.20
9
+ - click>=8.0
10
+ - pillow=8.3.1
11
+ - scipy=1.7.1
12
+ - pytorch=1.9.1
13
+ - cudatoolkit=11.1
14
+ - requests=2.26.0
15
+ - tqdm=4.62.2
16
+ - ninja=1.10.2
17
+ - matplotlib=3.4.2
18
+ - imageio=2.9.0
19
+ - pip:
20
+ - imgui==1.3.0
21
+ - glfw==2.2.0
22
+ - pyopengl==3.1.5
23
+ - imageio-ffmpeg==0.4.3
24
+ - lpips==0.1.4
25
+ - pyspng
26
+ - dlib
27
+ - opencv-python
28
+ - pandas
29
+ - moviepy
30
+ - imutils