ameerazam08 commited on
Commit
e34aada
1 Parent(s): ded828f

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. .gitignore +199 -0
  3. LICENSE +21 -0
  4. README-zh.md +144 -0
  5. README.md +149 -0
  6. TalkingHead-1KH/.gitignore +2 -0
  7. TalkingHead-1KH/LICENSE.txt +42 -0
  8. TalkingHead-1KH/README.md +84 -0
  9. TalkingHead-1KH/data_list.zip +3 -0
  10. TalkingHead-1KH/data_list/small_video_ids.txt +2 -0
  11. TalkingHead-1KH/data_list/small_video_tubes.txt +4 -0
  12. TalkingHead-1KH/data_list/train_video_ids.txt +2872 -0
  13. TalkingHead-1KH/data_list/train_video_tubes.txt +3 -0
  14. TalkingHead-1KH/data_list/val_video_ids.txt +28 -0
  15. TalkingHead-1KH/data_list/val_video_tubes.txt +38 -0
  16. TalkingHead-1KH/requirements.txt +4 -0
  17. TalkingHead-1KH/teaser.gif +3 -0
  18. TalkingHead-1KH/videos_crop.py +79 -0
  19. TalkingHead-1KH/videos_download.py +56 -0
  20. TalkingHead-1KH/videos_download_and_crop.sh +10 -0
  21. TalkingHead-1KH/videos_split.sh +11 -0
  22. checkpoints/.gitkeep +0 -0
  23. data_gen/eg3d/convert_to_eg3d_convention.py +146 -0
  24. data_gen/runs/binarizer_nerf.py +335 -0
  25. data_gen/runs/binarizer_th1kh.py +100 -0
  26. data_gen/runs/nerf/process_guide.md +49 -0
  27. data_gen/runs/nerf/run.sh +51 -0
  28. data_gen/utils/mp_feature_extractors/face_landmarker.py +130 -0
  29. data_gen/utils/mp_feature_extractors/face_landmarker.task +3 -0
  30. data_gen/utils/mp_feature_extractors/mp_segmenter.py +303 -0
  31. data_gen/utils/mp_feature_extractors/selfie_multiclass_256x256.tflite +3 -0
  32. data_gen/utils/path_converter.py +24 -0
  33. data_gen/utils/process_audio/extract_hubert.py +95 -0
  34. data_gen/utils/process_audio/extract_mel_f0.py +148 -0
  35. data_gen/utils/process_audio/resample_audio_to_16k.py +49 -0
  36. data_gen/utils/process_image/extract_lm2d.py +197 -0
  37. data_gen/utils/process_image/extract_segment_imgs.py +114 -0
  38. data_gen/utils/process_image/fit_3dmm_landmark.py +369 -0
  39. data_gen/utils/process_video/euler2quaterion.py +35 -0
  40. data_gen/utils/process_video/extract_blink.py +50 -0
  41. data_gen/utils/process_video/extract_lm2d.py +164 -0
  42. data_gen/utils/process_video/extract_segment_imgs.py +494 -0
  43. data_gen/utils/process_video/fit_3dmm_landmark.py +565 -0
  44. data_gen/utils/process_video/inpaint_torso_imgs.py +193 -0
  45. data_gen/utils/process_video/resample_video_to_25fps_resize_to_512.py +87 -0
  46. data_gen/utils/process_video/split_video_to_imgs.py +53 -0
  47. data_util/face3d_helper.py +309 -0
  48. deep_3drecon/BFM/.gitkeep +0 -0
  49. deep_3drecon/BFM/basel_53201.txt +0 -0
  50. deep_3drecon/BFM/index_mp468_from_mesh35709_v1.npy +3 -0
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ TalkingHead-1KH/data_list/train_video_tubes.txt filter=lfs diff=lfs merge=lfs -text
37
+ TalkingHead-1KH/teaser.gif filter=lfs diff=lfs merge=lfs -text
38
+ data_gen/utils/mp_feature_extractors/face_landmarker.task filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # big files
2
+ data_util/face_tracking/3DMM/01_MorphableModel.mat
3
+ data_util/face_tracking/3DMM/3DMM_info.npy
4
+
5
+ !/deep_3drecon/BFM/.gitkeep
6
+ deep_3drecon/BFM/Exp_Pca.bin
7
+ deep_3drecon/BFM/01_MorphableModel.mat
8
+ deep_3drecon/BFM/BFM_model_front.mat
9
+ deep_3drecon/network/FaceReconModel.pb
10
+ deep_3drecon/checkpoints/*
11
+
12
+ .vscode
13
+ ### Project ignore
14
+ /checkpoints/*
15
+ !/checkpoints/.gitkeep
16
+ /data/*
17
+ !/data/.gitkeep
18
+ infer_out
19
+ rsync
20
+ .idea
21
+ .DS_Store
22
+ bak
23
+ tmp
24
+ *.tar.gz
25
+ mos
26
+ nbs
27
+ /configs_usr/*
28
+ !/configs_usr/.gitkeep
29
+ /egs_usr/*
30
+ !/egs_usr/.gitkeep
31
+ /rnnoise
32
+ #/usr/*
33
+ #!/usr/.gitkeep
34
+ scripts_usr
35
+
36
+ # Created by .ignore support plugin (hsz.mobi)
37
+ ### Python template
38
+ # Byte-compiled / optimized / DLL files
39
+ __pycache__/
40
+ *.py[cod]
41
+ *$py.class
42
+
43
+ # C extensions
44
+ *.so
45
+
46
+ # Distribution / packaging
47
+ .Python
48
+ build/
49
+ develop-eggs/
50
+ dist/
51
+ downloads/
52
+ eggs/
53
+ .eggs/
54
+ lib/
55
+ lib64/
56
+ parts/
57
+ sdist/
58
+ var/
59
+ wheels/
60
+ pip-wheel-metadata/
61
+ share/python-wheels/
62
+ *.egg-info/
63
+ .installed.cfg
64
+ *.egg
65
+ MANIFEST
66
+
67
+ # PyInstaller
68
+ # Usually these files are written by a python script from a template
69
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
70
+ *.manifest
71
+ *.spec
72
+
73
+ # Installer logs
74
+ pip-log.txt
75
+ pip-delete-this-directory.txt
76
+
77
+ # Unit test / coverage reports
78
+ htmlcov/
79
+ .tox/
80
+ .nox/
81
+ .coverage
82
+ .coverage.*
83
+ .cache
84
+ nosetests.xml
85
+ coverage.xml
86
+ *.cover
87
+ .hypothesis/
88
+ .pytest_cache/
89
+
90
+ # Translations
91
+ *.mo
92
+ *.pot
93
+
94
+ # Django stuff:
95
+ *.log
96
+ local_settings.py
97
+ db.sqlite3
98
+ db.sqlite3-journal
99
+
100
+ # Flask stuff:
101
+ instance/
102
+ .webassets-cache
103
+
104
+ # Scrapy stuff:
105
+ .scrapy
106
+
107
+ # Sphinx documentation
108
+ docs/_build/
109
+
110
+ # PyBuilder
111
+ target/
112
+
113
+ # Jupyter Notebook
114
+ .ipynb_checkpoints
115
+
116
+ # IPython
117
+ profile_default/
118
+ ipython_config.py
119
+
120
+ # pyenv
121
+ .python-version
122
+
123
+ # pipenv
124
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
125
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
126
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
127
+ # install all needed dependencies.
128
+ #Pipfile.lock
129
+
130
+ # celery beat schedule file
131
+ celerybeat-schedule
132
+
133
+ # SageMath parsed files
134
+ *.sage.py
135
+
136
+ # Environments
137
+ .env
138
+ .venv
139
+ env/
140
+ venv/
141
+ ENV/
142
+ env.bak/
143
+ venv.bak/
144
+
145
+ # Spyder project settings
146
+ .spyderproject
147
+ .spyproject
148
+
149
+ # Rope project settings
150
+ .ropeproject
151
+
152
+ # mkdocs documentation
153
+ /site
154
+
155
+ # mypy
156
+ .mypy_cache/
157
+ .dmypy.json
158
+ dmypy.json
159
+
160
+ # Pyre type checker
161
+ .pyre/
162
+ data_util/deepspeech_features/deepspeech-0.9.2-models.pbmm
163
+ deep_3drecon/mesh_renderer/bazel-bin
164
+ deep_3drecon/mesh_renderer/bazel-mesh_renderer
165
+ deep_3drecon/mesh_renderer/bazel-out
166
+ deep_3drecon/mesh_renderer/bazel-testlogs
167
+
168
+ .nfs*
169
+ infer_outs/*
170
+
171
+ *.pth
172
+ venv_113/*
173
+ *.pt
174
+ experiments/trials
175
+ flame_3drecon/*
176
+
177
+ temp/
178
+ /kill.sh
179
+ /datasets
180
+ data_util/imagenet_classes.txt
181
+ process_data_May.sh
182
+ /env_prepare_reproduce.md
183
+ /my_debug.py
184
+
185
+ utils/metrics/shape_predictor_68_face_landmarks.dat
186
+ *.mp4
187
+ _torchshow/
188
+ *.png
189
+ *.jpg
190
+
191
+ *.mrc
192
+
193
+ deep_3drecon/BFM/BFM_exp_idx.mat
194
+ deep_3drecon/BFM/BFM_front_idx.mat
195
+ deep_3drecon/BFM/facemodel_info.mat
196
+ deep_3drecon/BFM/index_mp468_from_mesh35709.npy
197
+ deep_3drecon/BFM/mediapipe_in_bfm53201.npy
198
+ deep_3drecon/BFM/std_exp.txt
199
+ !data/raw/examples/*
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 ZhenhuiYe
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README-zh.md ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Real3D-Portrait: One-shot Realistic 3D Talking Portrait Synthesis | ICLR 2024 Spotlight
2
+ [![arXiv](https://img.shields.io/badge/arXiv-Paper-%3CCOLOR%3E.svg)](https://arxiv.org/abs/2401.08503)| [![GitHub Stars](https://img.shields.io/github/stars/yerfor/Real3DPortrait
3
+ )](https://github.com/yerfor/Real3DPortrait) | [English Readme](./README.md)
4
+
5
+ 这个仓库是Real3D-Portrait的官方PyTorch实现, 用于实现单参考图(one-shot)、高视频真实度(video reality)的虚拟人视频合成。您可以访问我们的[项目页面](https://real3dportrait.github.io/)以观看Demo视频, 阅读我们的[论文](https://arxiv.org/pdf/2401.08503.pdf)以了解技术细节。
6
+
7
+ <p align="center">
8
+ <br>
9
+ <img src="assets/real3dportrait.png" width="100%"/>
10
+ <br>
11
+ </p>
12
+
13
+ ## 您可能同样感兴趣
14
+ - 我们发布了GeneFace++([https://github.com/yerfor/GeneFacePlusPlus](https://github.com/yerfor/GeneFacePlusPlus)), 一个专注于提升单个特定说话人效果的说话人合成系统,它实现了高嘴形对齐、高视频质量和高系统效率。
15
+
16
+
17
+ # 快速上手!
18
+ ## 安装环境
19
+ 请参照[环境配置文档](docs/prepare_env/install_guide-zh.md),配置Conda环境`real3dportrait`
20
+ ## 下载预训练与第三方模型
21
+ ### 3DMM BFM模型
22
+ 下载3DMM BFM模型:[Google Drive](https://drive.google.com/drive/folders/1o4t5YIw7w4cMUN4bgU9nPf6IyWVG1bEk?usp=sharing) 或 [BaiduYun Disk](https://pan.baidu.com/s/1aqv1z_qZ23Vp2VP4uxxblQ?pwd=m9q5 ) 提取码: m9q5
23
+
24
+
25
+ 下载完成后,放置全部的文件到`deep_3drecon/BFM`里,文件结构如下:
26
+ ```
27
+ deep_3drecon/BFM/
28
+ ├── 01_MorphableModel.mat
29
+ ├── BFM_exp_idx.mat
30
+ ├── BFM_front_idx.mat
31
+ ├── BFM_model_front.mat
32
+ ├── Exp_Pca.bin
33
+ ├── facemodel_info.mat
34
+ ├── index_mp468_from_mesh35709.npy
35
+ ├── mediapipe_in_bfm53201.npy
36
+ └── std_exp.txt
37
+ ```
38
+
39
+ ### 预训练模型
40
+ 下载预训练的Real3D-Portrait:[Google Drive](https://drive.google.com/drive/folders/1MAveJf7RvJ-Opg1f5qhLdoRoC_Gc6nD9?usp=sharing) 或 [BaiduYun Disk](https://pan.baidu.com/s/1Mjmbn0UtA1Zm9owZ7zWNgQ?pwd=6x4f ) 提取码: 6x4f
41
+
42
+ 下载完成后,放置全部的文件到`checkpoints`里并解压,文件结构如下:
43
+ ```
44
+ checkpoints/
45
+ ├── 240210_real3dportrait_orig
46
+ │ ├── audio2secc_vae
47
+ │ │ ├── config.yaml
48
+ │ │ └── model_ckpt_steps_400000.ckpt
49
+ │ └── secc2plane_torso_orig
50
+ │ ├── config.yaml
51
+ │ └── model_ckpt_steps_100000.ckpt
52
+ └── pretrained_ckpts
53
+ └── mit_b0.pth
54
+ ```
55
+
56
+ ## 推理测试
57
+ 我们目前提供了**命令行(CLI)**, **Gradio WebUI**与**Google Colab**推理方式。我们同时支持音频驱动(Audio-Driven)与视频驱动(Video-Driven):
58
+
59
+ - 音频驱动场景下,需要至少提供`source image`与`driving audio`
60
+ - 视频驱动场景下,需要至少提供`source image`与`driving expression video`
61
+
62
+ ### Gradio WebUI推理
63
+ 启动Gradio WebUI,按照提示上传素材,点击`Generate`按钮即可推理:
64
+ ```bash
65
+ python inference/app_real3dportrait.py
66
+ ```
67
+
68
+ ### Google Colab推理
69
+ 运行这个[Colab](https://colab.research.google.com/github/yerfor/Real3DPortrait/blob/main/inference/real3dportrait_demo.ipynb)中的所有cell。
70
+
71
+ ### 命令行推理
72
+ 首先,切换至项目根目录并启用Conda环境:
73
+ ```bash
74
+ cd <Real3DPortraitRoot>
75
+ conda activate real3dportrait
76
+ export PYTHON_PATH=./
77
+ ```
78
+ 音频驱动场景下,需要至少提供source image与driving audio,推理指令:
79
+ ```bash
80
+ python inference/real3d_infer.py \
81
+ --src_img <PATH_TO_SOURCE_IMAGE> \
82
+ --drv_aud <PATH_TO_AUDIO> \
83
+ --drv_pose <PATH_TO_POSE_VIDEO, OPTIONAL> \
84
+ --bg_img <PATH_TO_BACKGROUND_IMAGE, OPTIONAL> \
85
+ --out_name <PATH_TO_OUTPUT_VIDEO, OPTIONAL>
86
+ ```
87
+ 视频驱动场景下,需要至少提供source image与driving expression video(作为drv_aud参数),推理指令:
88
+ ```bash
89
+ python inference/real3d_infer.py \
90
+ --src_img <PATH_TO_SOURCE_IMAGE> \
91
+ --drv_aud <PATH_TO_EXP_VIDEO> \
92
+ --drv_pose <PATH_TO_POSE_VIDEO, OPTIONAL> \
93
+ --bg_img <PATH_TO_BACKGROUND_IMAGE, OPTIONAL> \
94
+ --out_name <PATH_TO_OUTPUT_VIDEO, OPTIONAL>
95
+ ```
96
+ 一些可选参数注释:
97
+ - `--drv_pose` 指定时提供了运动pose信息,不指定则为静态运动
98
+ - `--bg_img` 指定时提供了背景信息,不指定则为source image提取的背景
99
+ - `--mouth_amp` 嘴部张幅参数,值越大张幅越大
100
+ - `--map_to_init_pose` 值为`True`时,首帧的pose将被映射到source pose,后续帧也作相同变换
101
+ - `--temperature` 代表audio2motion的采样温度,值越大结果越多样,但同时精确度越低
102
+ - `--out_name` 不指定时,结果将保存在`infer_out/tmp/`中
103
+ - `--out_mode` 值为`final`时,只输出说话人视频;值为`concat_debug`时,同时输出一些可视化的中间结果
104
+
105
+ 指令示例:
106
+ ```bash
107
+ python inference/real3d_infer.py \
108
+ --src_img data/raw/examples/Macron.png \
109
+ --drv_aud data/raw/examples/Obama_5s.wav \
110
+ --drv_pose data/raw/examples/May_5s.mp4 \
111
+ --bg_img data/raw/examples/bg.png \
112
+ --out_name output.mp4 \
113
+ --out_mode concat_debug
114
+ ```
115
+
116
+ ## ToDo
117
+ - [x] **Release Pre-trained weights of Real3D-Portrait.**
118
+ - [x] **Release Inference Code of Real3D-Portrait.**
119
+ - [x] **Release Gradio Demo of Real3D-Portrait..**
120
+ - [x] **Release Google Colab of Real3D-Portrait..**
121
+ - [ ] **Release Training Code of Real3D-Portrait.**
122
+
123
+ # 引用我们
124
+ 如果这个仓库对你有帮助,请考虑引用我们的工作:
125
+ ```
126
+ @article{ye2024real3d,
127
+ title={Real3D-Portrait: One-shot Realistic 3D Talking Portrait Synthesis},
128
+ author={Ye, Zhenhui and Zhong, Tianyun and Ren, Yi and Yang, Jiaqi and Li, Weichuang and Huang, Jiawei and Jiang, Ziyue and He, Jinzheng and Huang, Rongjie and Liu, Jinglin and others},
129
+ journal={arXiv preprint arXiv:2401.08503},
130
+ year={2024}
131
+ }
132
+ @article{ye2023geneface++,
133
+ title={GeneFace++: Generalized and Stable Real-Time Audio-Driven 3D Talking Face Generation},
134
+ author={Ye, Zhenhui and He, Jinzheng and Jiang, Ziyue and Huang, Rongjie and Huang, Jiawei and Liu, Jinglin and Ren, Yi and Yin, Xiang and Ma, Zejun and Zhao, Zhou},
135
+ journal={arXiv preprint arXiv:2305.00787},
136
+ year={2023}
137
+ }
138
+ @article{ye2023geneface,
139
+ title={GeneFace: Generalized and High-Fidelity Audio-Driven 3D Talking Face Synthesis},
140
+ author={Ye, Zhenhui and Jiang, Ziyue and Ren, Yi and Liu, Jinglin and He, Jinzheng and Zhao, Zhou},
141
+ journal={arXiv preprint arXiv:2301.13430},
142
+ year={2023}
143
+ }
144
+ ```
README.md ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Real3D-Portrait: One-shot Realistic 3D Talking Portrait Synthesis | ICLR 2024 Spotlight
2
+ [![arXiv](https://img.shields.io/badge/arXiv-Paper-%3CCOLOR%3E.svg)](https://arxiv.org/abs/2401.08503)| [![GitHub Stars](https://img.shields.io/github/stars/yerfor/Real3DPortrait
3
+ )](https://github.com/yerfor/Real3DPortrait) | [中文文档](./README-zh.md)
4
+
5
+ This is the official repo of Real3D-Portrait with Pytorch implementation, for one-shot and high video reality talking portrait synthesis. You can visit our [Demo Page](https://real3dportrait.github.io/) for watching demo videos, and read our [Paper](https://arxiv.org/pdf/2401.08503.pdf) for technical details.
6
+
7
+ <p align="center">
8
+ <br>
9
+ <img src="assets/real3dportrait.png" width="100%"/>
10
+ <br>
11
+ </p>
12
+
13
+ ## 🔥 Update
14
+ - \[2024.07.02\] We release the training code of the whole system, including audio-to-motion model, image-to-plane model, secc2plane model, and the secc2plane_torso model, please refer to `docs/train_models`. We also release the code to preprocess and binarize the dataset, please refer to `docs/process_data`. Thanks for your patience!
15
+
16
+ ## You may also interested in
17
+ - We release the code of GeneFace++, ([https://github.com/yerfor/GeneFacePlusPlus](https://github.com/yerfor/GeneFacePlusPlus)), a NeRF-based person-specific talking face system, which aims at producing high-quality talking face videos with extreme idenetity-similarity of the target person.
18
+
19
+ # Quick Start!
20
+ ## Environment Installation
21
+ Please refer to [Installation Guide](docs/prepare_env/install_guide.md), prepare a Conda environment `real3dportrait`.
22
+ ## Download Pre-trained & Third-Party Models
23
+ ### 3DMM BFM Model
24
+ Download 3DMM BFM Model from [Google Drive](https://drive.google.com/drive/folders/1o4t5YIw7w4cMUN4bgU9nPf6IyWVG1bEk?usp=sharing) or [BaiduYun Disk](https://pan.baidu.com/s/1aqv1z_qZ23Vp2VP4uxxblQ?pwd=m9q5 ) with Password m9q5.
25
+
26
+
27
+ Put all the files in `deep_3drecon/BFM`, the file structure will be like this:
28
+ ```
29
+ deep_3drecon/BFM/
30
+ ├── 01_MorphableModel.mat
31
+ ├── BFM_exp_idx.mat
32
+ ├── BFM_front_idx.mat
33
+ ├── BFM_model_front.mat
34
+ ├── Exp_Pca.bin
35
+ ├── facemodel_info.mat
36
+ ├── index_mp468_from_mesh35709.npy
37
+ ├── mediapipe_in_bfm53201.npy
38
+ └── std_exp.txt
39
+ ```
40
+
41
+ ### Pre-trained Real3D-Portrait
42
+ Download Pre-trained Real3D-Portrait:[Google Drive](https://drive.google.com/drive/folders/1MAveJf7RvJ-Opg1f5qhLdoRoC_Gc6nD9?usp=sharing) or [BaiduYun Disk](https://pan.baidu.com/s/1Mjmbn0UtA1Zm9owZ7zWNgQ?pwd=6x4f ) with Password 6x4f
43
+
44
+ Put the zip files in `checkpoints` and unzip them, the file structure will be like this:
45
+ ```
46
+ checkpoints/
47
+ ├── 240210_real3dportrait_orig
48
+ │ ├── audio2secc_vae
49
+ │ │ ├── config.yaml
50
+ │ │ └── model_ckpt_steps_400000.ckpt
51
+ │ └── secc2plane_torso_orig
52
+ │ ├── config.yaml
53
+ │ └── model_ckpt_steps_100000.ckpt
54
+ └── pretrained_ckpts
55
+ └── mit_b0.pth
56
+ ```
57
+
58
+ ## Inference
59
+ Currently, we provide **CLI**, **Gradio WebUI** and **Google Colab** for inference. We support both Audio-Driven and Video-Driven methods:
60
+
61
+ - For audio-driven, at least prepare `source image` and `driving audio`
62
+ - For video-driven, at least prepare `source image` and `driving expression video`
63
+
64
+ ### Gradio WebUI
65
+ Run Gradio WebUI demo, upload resouces in webpage,click `Generate` button to inference:
66
+ ```bash
67
+ python inference/app_real3dportrait.py
68
+ ```
69
+
70
+ ### Google Colab
71
+ Run all the cells in this [Colab](https://colab.research.google.com/github/yerfor/Real3DPortrait/blob/main/inference/real3dportrait_demo.ipynb).
72
+
73
+ ### CLI Inference
74
+ Firstly, switch to project folder and activate conda environment:
75
+ ```bash
76
+ cd <Real3DPortraitRoot>
77
+ conda activate real3dportrait
78
+ export PYTHONPATH=./
79
+ ```
80
+ For audio-driven, provide source image and driving audio:
81
+ ```bash
82
+ python inference/real3d_infer.py \
83
+ --src_img <PATH_TO_SOURCE_IMAGE> \
84
+ --drv_aud <PATH_TO_AUDIO> \
85
+ --drv_pose <PATH_TO_POSE_VIDEO, OPTIONAL> \
86
+ --bg_img <PATH_TO_BACKGROUND_IMAGE, OPTIONAL> \
87
+ --out_name <PATH_TO_OUTPUT_VIDEO, OPTIONAL>
88
+ ```
89
+ For video-driven, provide source image and driving expression video(as `--drv_aud` parameter):
90
+ ```bash
91
+ python inference/real3d_infer.py \
92
+ --src_img <PATH_TO_SOURCE_IMAGE> \
93
+ --drv_aud <PATH_TO_EXP_VIDEO> \
94
+ --drv_pose <PATH_TO_POSE_VIDEO, OPTIONAL> \
95
+ --bg_img <PATH_TO_BACKGROUND_IMAGE, OPTIONAL> \
96
+ --out_name <PATH_TO_OUTPUT_VIDEO, OPTIONAL>
97
+ ```
98
+ Some optional parameters:
99
+ - `--drv_pose` provide motion pose information, default to be static poses
100
+ - `--bg_img` provide background information, default to be image extracted from source
101
+ - `--mouth_amp` mouth amplitude, higher value leads to wider mouth
102
+ - `--map_to_init_pose` when set to `True`, the initial pose will be mapped to source pose, and other poses will be equally transformed
103
+ - `--temperature` stands for the sampling temperature of audio2motion, higher for more diverse results at the expense of lower accuracy
104
+ - `--out_name` When not assigned, the results will be stored at `infer_out/tmp/`.
105
+ - `--out_mode` When `final`, only outputs the final result; when `concat_debug`, also outputs visualization of several intermediate process.
106
+
107
+ Commandline example:
108
+ ```bash
109
+ python inference/real3d_infer.py \
110
+ --src_img data/raw/examples/Macron.png \
111
+ --drv_aud data/raw/examples/Obama_5s.wav \
112
+ --drv_pose data/raw/examples/May_5s.mp4 \
113
+ --bg_img data/raw/examples/bg.png \
114
+ --out_name output.mp4 \
115
+ --out_mode concat_debug
116
+ ```
117
+
118
+ # ToDo
119
+ - [x] **Release Pre-trained weights of Real3D-Portrait.**
120
+ - [x] **Release Inference Code of Real3D-Portrait.**
121
+ - [x] **Release Gradio Demo of Real3D-Portrait..**
122
+ - [x] **Release Google Colab of Real3D-Portrait..**
123
+ - [ ] **Release Training Code of Real3D-Portrait.**
124
+
125
+ # Disclaimer
126
+ Any organization or individual is prohibited from using any technology mentioned in this paper to generate someone's talking video without his/her consent, including but not limited to government leaders, political figures, and celebrities. If you do not comply with this item, you could be in violation of copyright laws.
127
+
128
+ # Citation
129
+ If you found this repo helpful to your work, please consider cite us:
130
+ ```
131
+ @article{ye2024real3d,
132
+ title={Real3D-Portrait: One-shot Realistic 3D Talking Portrait Synthesis},
133
+ author={Ye, Zhenhui and Zhong, Tianyun and Ren, Yi and Yang, Jiaqi and Li, Weichuang and Huang, Jiawei and Jiang, Ziyue and He, Jinzheng and Huang, Rongjie and Liu, Jinglin and others},
134
+ journal={arXiv preprint arXiv:2401.08503},
135
+ year={2024}
136
+ }
137
+ @article{ye2023geneface++,
138
+ title={GeneFace++: Generalized and Stable Real-Time Audio-Driven 3D Talking Face Generation},
139
+ author={Ye, Zhenhui and He, Jinzheng and Jiang, Ziyue and Huang, Rongjie and Huang, Jiawei and Liu, Jinglin and Ren, Yi and Yin, Xiang and Ma, Zejun and Zhao, Zhou},
140
+ journal={arXiv preprint arXiv:2305.00787},
141
+ year={2023}
142
+ }
143
+ @article{ye2023geneface,
144
+ title={GeneFace: Generalized and High-Fidelity Audio-Driven 3D Talking Face Synthesis},
145
+ author={Ye, Zhenhui and Jiang, Ziyue and Ren, Yi and Liu, Jinglin and He, Jinzheng and Zhao, Zhou},
146
+ journal={arXiv preprint arXiv:2301.13430},
147
+ year={2023}
148
+ }
149
+ ```
TalkingHead-1KH/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ data/
2
+ .DS_Store
TalkingHead-1KH/LICENSE.txt ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: MIT
3
+ #
4
+ # Permission is hereby granted, free of charge, to any person obtaining a
5
+ # copy of this software and associated documentation files (the "Software"),
6
+ # to deal in the Software without restriction, including without limitation
7
+ # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
+ # and/or sell copies of the Software, and to permit persons to whom the
9
+ # Software is furnished to do so, subject to the following conditions:
10
+ #
11
+ # The above copyright notice and this permission notice shall be included in
12
+ # all copies or substantial portions of the Software.
13
+ #
14
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
+ # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
+ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
+ # DEALINGS IN THE SOFTWARE.
21
+ /*
22
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
23
+ * SPDX-License-Identifier: MIT
24
+ *
25
+ * Permission is hereby granted, free of charge, to any person obtaining a
26
+ * copy of this software and associated documentation files (the "Software"),
27
+ * to deal in the Software without restriction, including without limitation
28
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
29
+ * and/or sell copies of the Software, and to permit persons to whom the
30
+ * Software is furnished to do so, subject to the following conditions:
31
+ *
32
+ * The above copyright notice and this permission notice shall be included in
33
+ * all copies or substantial portions of the Software.
34
+ *
35
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
38
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
39
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
40
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
41
+ * DEALINGS IN THE SOFTWARE.
42
+ */
TalkingHead-1KH/README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## TalkingHead-1KH Dataset
2
+ ![Python 3.7](https://img.shields.io/badge/python-3.7-green.svg?style=plastic)
3
+ ![License CC](https://img.shields.io/badge/license-CC-green.svg?style=plastic)
4
+ ![Format MP4](https://img.shields.io/badge/format-MP4-green.svg?style=plastic)
5
+ ![Resolution 512&times;512](https://img.shields.io/badge/resolution-512&times;512-green.svg?style=plastic)
6
+ ![Videos 500k](https://img.shields.io/badge/videos-500,000-green.svg?style=plastic)
7
+
8
+ <img src='teaser.gif' width='800'/>
9
+
10
+
11
+ TalkingHead-1KH is a talking-head dataset consisting of YouTube videos, originally created as a benchmark for face-vid2vid:
12
+
13
+ > **One-Shot Free-View Neural Talking-Head Synthesis for Video Conferencing**<br>
14
+ > Ting-Chun Wang (NVIDIA), Arun Mallya (NVIDIA), Ming-Yu Liu (NVIDIA)<br>
15
+ > https://nvlabs.github.io/face-vid2vid/<br>
16
+ > https://arxiv.org/abs/2011.15126.pdf
17
+
18
+ The dataset consists of 500k video clips, of which about 80k are greater than 512x512 resolution. Only videos under permissive licenses are included. Note that the number of videos differ from that in the original paper because a more robust preprocessing script was used to split the videos.
19
+ For business inquiries, please visit our website and submit the form: [NVIDIA Research Licensing](https://www.nvidia.com/en-us/research/inquiries/).
20
+
21
+
22
+ ## Download
23
+ ### Unzip the video metadata
24
+ First, unzip the metadata and put it under the root directory:
25
+ ```bash
26
+ unzip data_list.zip
27
+ ```
28
+
29
+ ### Unit test
30
+ This step downloads a small subset of the dataset to verify the scripts are working on your computer. You can also skip this step if you want to directly download the entire dataset.
31
+ ```bash
32
+ bash videos_download_and_crop.sh small
33
+ ```
34
+ The processed clips should appear in `small/cropped_clips`.
35
+
36
+ ### Download the entire dataset
37
+ Please run
38
+ ```bash
39
+ bash videos_download_and_crop.sh train
40
+ ```
41
+ The script will automatically download the YouTube videos, split them into short clips, and then crop and trim them to include only the face regions. The final processed clips should appear in `train/cropped_clips`.
42
+
43
+
44
+ ## Evaluation
45
+ To download the evaluation set which consists of only 1080p videos, please run
46
+ ```bash
47
+ bash videos_download_and_crop.sh val
48
+ ```
49
+ The processed clips should appear in `val/cropped_clips`.
50
+
51
+ We also provide the reconstruction results synthesized by our model [here](https://drive.google.com/file/d/1BX9zaNL_zowTDruvRB3KvebaSUi3WHWc/view?usp=sharing).
52
+ For each video, we use only the first frame to reconstruct all the following frames.
53
+
54
+ Furthermore, for models trained using the VoxCeleb2 dataset, we also provide comparisons using another model trained on the VoxCeleb2 dataset.
55
+ Please find the reconstruction results [here](https://drive.google.com/file/d/1HVCFj7WOy9KHP1J76wn-ZExh-nQnff9g/view?usp=sharing).
56
+
57
+
58
+ ## Licenses
59
+ The individual videos were published in YouTube by their respective authors under [Creative Commons BY 3.0](https://creativecommons.org/licenses/by/3.0/legalcode) license.
60
+ The metadata file, the download script file, the processing script file, and the documentation file are made available under [MIT](LICENSE.txt) license. You can **use, redistribute, and adapt it**, as long as you (a) give appropriate credit by **citing our paper**, (b) **indicate any changes** that you've made, and (c) distribute any derivative works **under the same license**.
61
+
62
+
63
+ ## Privacy
64
+ When collecting the data, we were careful to only include videos that &ndash; to the best of our knowledge &ndash; were intended for free use and redistribution by their respective authors. That said, we are committed to protecting the privacy of individuals who do not wish their videos to be included.
65
+
66
+ If you would like to remove your video from the dataset, you can either
67
+
68
+ 1. Go to YouTube and change the license of your video, or remove your video entirely.
69
+ 2. Contact [[email protected]](mailto:[email protected]). Please include your YouTube video link in the email.
70
+
71
+
72
+ ## Acknowledgements
73
+ This webpage borrows heavily from the [FFHQ-dataset](https://github.com/NVlabs/ffhq-dataset) page.
74
+
75
+ ## Citation
76
+ If you use this dataset for your work, please cite
77
+ ```
78
+ @inproceedings{wang2021facevid2vid,
79
+   title={One-Shot Free-View Neural Talking-Head Synthesis for Video Conferencing},
80
+   author={Ting-Chun Wang and Arun Mallya and Ming-Yu Liu},
81
+   booktitle={CVPR},
82
+   year={2021}
83
+ }
84
+ ```
TalkingHead-1KH/data_list.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb6179ecac9f08c7c2d960cb8a36354e9fa2edde7215d1fc5a20d6a342050f1a
3
+ size 6822806
TalkingHead-1KH/data_list/small_video_ids.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ --Y9imYnfBw
2
+ -7TMJtnhiPM
TalkingHead-1KH/data_list/small_video_tubes.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ --Y9imYnfBw_0000, 720, 1280, 0, 271, 504, 63, 792, 351
2
+ --Y9imYnfBw_0000, 720, 1280, 1015, 1107, 488, 23, 824, 359
3
+ -7TMJtnhiPM_0000, 720, 1280, 1202, 1607, 345, 26, 857, 538
4
+ -7TMJtnhiPM_0000, 720, 1280, 1608, 1674, 467, 52, 851, 436
TalkingHead-1KH/data_list/train_video_ids.txt ADDED
@@ -0,0 +1,2872 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --Y9imYnfBw
2
+ -5Lm-oibjJQ
3
+ -7TMJtnhiPM
4
+ -8lgFak3RwU
5
+ -9qRhqjD7PY
6
+ -A0jrCGS_TE
7
+ -B1Z9vrjpgg
8
+ -B9PTg7XynE
9
+ -BNR218UFX4
10
+ -BPwyGVD2ec
11
+ -DiIOp80LOo
12
+ -EM2xi5Jnd0
13
+ -FbPk4SmV0M
14
+ -GZFS7r2GJ0
15
+ -GtLyhxJ4V4
16
+ -II0hSGU52I
17
+ -J3Z1m6vrCs
18
+ -J9ECsMrNkY
19
+ -KBiHKx54sg
20
+ -KL3wSSFx10
21
+ -Qi6_EcnYEI
22
+ -RuxUoQQaT0
23
+ -TNWOtcCbOU
24
+ -UrrtxGsmoU
25
+ -ZqVCfsj3yg
26
+ -_A67wxucLo
27
+ -_KOlZjGD9c
28
+ -a7oWdSyePY
29
+ -acp9Dk7Ve4
30
+ -axp0KRQJYc
31
+ -ayP3gFLy9s
32
+ -cNe_z2qsGQ
33
+ -dvajjXM2dg
34
+ -dxGq0Mu0oY
35
+ -flEiFmuh64
36
+ -gnJtZFyzZA
37
+ -hLsqlKm-A4
38
+ -mingboa4sM
39
+ -n-HQiT-mkw
40
+ -o8Ns4hvhr0
41
+ -qsTrNdfd1w
42
+ -r-ckuF3JSg
43
+ -tIO8GnrSJM
44
+ -xlo-qTzC7s
45
+ -yhJf04a1mI
46
+ -zqAjBpG0Jw
47
+ 0-FQbhkWYuY
48
+ 04-JxYnzcq0
49
+ 04BN6UOVKOM
50
+ 04WJEEb33CY
51
+ 04fidgplUXU
52
+ 05e4nROsPHM
53
+ 06RuVZUbUc8
54
+ 07F4x2eeD1k
55
+ 0B5ojfWryqA
56
+ 0Fz4oferM-c
57
+ 0H-cSZFZq_4
58
+ 0ND-w_eEOQw
59
+ 0NLQfXOo0dM
60
+ 0OaM12UIeVA
61
+ 0OzxymzEo-A
62
+ 0P-9yxO2df4
63
+ 0PffDSC_cLU
64
+ 0PtUxdA35Zs
65
+ 0RQl9e11aeE
66
+ 0S269HFjKx0
67
+ 0SKHozldsNM
68
+ 0S_QNLx2S7E
69
+ 0X6ZN_eRaSI
70
+ 0_Jci9-d2VY
71
+ 0_udiqKlYuU
72
+ 0aHSV7nbEvE
73
+ 0aZJJHF4K8U
74
+ 0aiKZGqjvL4
75
+ 0bA8z8qYmdo
76
+ 0beHbJDqlOs
77
+ 0cSDEZ6NxM8
78
+ 0cfM5xNaaSQ
79
+ 0ez7IiBfCGE
80
+ 0hhbwkQ06EI
81
+ 0hmQbZUYG4o
82
+ 0kLlrDNq9RA
83
+ 0lOkJkh2Hwg
84
+ 0lcaZupVMmo
85
+ 0oE11MGQRZM
86
+ 0pQvRqU43eU
87
+ 0pe-7C_c-fY
88
+ 0rZJhc1FeU4
89
+ 0t9j7CfXTcM
90
+ 0uGzSXRi7qQ
91
+ 0ueGFLVFi80
92
+ 0zTkQXcIaqc
93
+ 1-YOlGoKxeM
94
+ 13FSE5dCvvM
95
+ 1653Kz-SnxA
96
+ 16uK2Gbyk2k
97
+ 18Kl4qkvGUU
98
+ 18Ts-YxUIts
99
+ 19baUOGVR2M
100
+ 1ADqe6vt0xE
101
+ 1BQNyk_BgDQ
102
+ 1BuXth0dz5s
103
+ 1C7do4tIrfo
104
+ 1DgBEF9zbvc
105
+ 1FBVcxb4PQA
106
+ 1FmSxmpitBA
107
+ 1HIHu4mxDLE
108
+ 1IJ3ISINhnU
109
+ 1JSocaAw--A
110
+ 1JwEvzz-kf0
111
+ 1MibRcX7KUM
112
+ 1MvtBdLyAKc
113
+ 1O3ghiyirvU
114
+ 1OUkXFsyyP0
115
+ 1PDs1mL7TG0
116
+ 1PV7Hy_8fhA
117
+ 1R5dPw4sYrE
118
+ 1SLkDhvOnNQ
119
+ 1UHlG0puRUM
120
+ 1UyhO1hMhrU
121
+ 1WOs6S0VrlA
122
+ 1_rRguH_Vx4
123
+ 1aQGw_-I25I
124
+ 1aY5n5H0ugE
125
+ 1bMYn6Vb-Mk
126
+ 1bwUe85Mg4U
127
+ 1h3J3fDsupc
128
+ 1hd5majTxeQ
129
+ 1i8607wMy68
130
+ 1iXuJgqZhik
131
+ 1izPOehU6zA
132
+ 1k3DrJOQ008
133
+ 1lPU8Tw928Q
134
+ 1mAMa9hn-18
135
+ 1oPgMsUmC6Y
136
+ 1q7yGJbX1sI
137
+ 1rxyrZUtm_g
138
+ 1tZaTf21D64
139
+ 1wKWC0EzK_s
140
+ 1wsOU17LXTU
141
+ 1yHWhXaXxFk
142
+ 1yP_MElLiKM
143
+ 1z-0ranXCyc
144
+ 1z2ZTPZEx1E
145
+ 1z8sqW5xIrA
146
+ 216-fEvtTag
147
+ 2250kOXVaGE
148
+ 22AUNaHGGz8
149
+ 22tF3y_epZs
150
+ 23eZ2Hjh3gY
151
+ 24paFjOVKoE
152
+ 264aaUUvB9U
153
+ 2CQgBtUJNBc
154
+ 2CRIlwxj6uo
155
+ 2ChRXYtCCXs
156
+ 2DXm-gBAbDQ
157
+ 2EgGqTDAiC4
158
+ 2H7qsNMyxvU
159
+ 2HPH4qtE95k
160
+ 2Ix4h6gaa1w
161
+ 2JgN7AZO_6Y
162
+ 2KTA78QK5-A
163
+ 2KmXllrBNHw
164
+ 2L-QZfbDm_s
165
+ 2LdAF2dHHFU
166
+ 2Li-PLW3Aew
167
+ 2NV_nn8apOA
168
+ 2O_SN4pg-lk
169
+ 2ObBpwabNmg
170
+ 2OfVzz2aZ8c
171
+ 2PejNMim-YQ
172
+ 2PqqZcC31ys
173
+ 2R3GNE3ob-4
174
+ 2Rtp8zWE6nQ
175
+ 2T2f2cENuiM
176
+ 2TmfeHf8Rqw
177
+ 2VLez1fkVq4
178
+ 2VdwqgI2l2o
179
+ 2bAuuK1uU1M
180
+ 2d9BcdLC4dA
181
+ 2dt6XZlI-Lg
182
+ 2e6a4eog8jc
183
+ 2eZgkkbNq54
184
+ 2eaxjEpQPkk
185
+ 2ep9Eb36OCY
186
+ 2gIkmyxa2ds
187
+ 2gMaIjvjCFo
188
+ 2k7VhqEg7ws
189
+ 2llLx_WkF3Q
190
+ 2llMolwKs88
191
+ 2mLsOuOve8k
192
+ 2nhDjEnUslE
193
+ 2nj6tpVKUxQ
194
+ 2qDH4bPvfMw
195
+ 2qVqaA1hn7A
196
+ 2rxU553pKgs
197
+ 30QwIdz8Vjc
198
+ 30gEiweaAVQ
199
+ 32DCEzpcVEY
200
+ 35ELLdyGmpc
201
+ 37QWwuNFVmU
202
+ 39Axc3FIu9A
203
+ 3AJdjwSsKoI
204
+ 3AoYXlMCioI
205
+ 3BkL0UXgNDs
206
+ 3C0T230n-mo
207
+ 3CGECvfakGM
208
+ 3EFiRh-y4L0
209
+ 3EJtdIIh43s
210
+ 3EYU3VTI3IU
211
+ 3EsqpF-W_wQ
212
+ 3FKX7kcU4hA
213
+ 3GUcVRz_JyA
214
+ 3I2jJpnw_xY
215
+ 3KNGPFM4d6c
216
+ 3KtIBPwaRkk
217
+ 3NJN-C1R9gA
218
+ 3Qlr9YoHBkg
219
+ 3RITVJy7ogI
220
+ 3Tyan5xgQ4Y
221
+ 3UYOtcC00L4
222
+ 3YE8YLldLu8
223
+ 3Yepui-bWyw
224
+ 3ZLYs6Wj51s
225
+ 3ZQL-3ElxpY
226
+ 3ZlXiEZb3k0
227
+ 3_celBcyBJE
228
+ 3fyphrZ-yUc
229
+ 3gCA0Z4y7wA
230
+ 3hPHW74cBRM
231
+ 3jESlqRuLw0
232
+ 3kyzHQL4s1c
233
+ 3l_puRAIDDU
234
+ 3lpnoDrt5Tw
235
+ 3nJexBvb5UY
236
+ 3oV_cljdDs8
237
+ 3pIR5nfFNQM
238
+ 3qBW_x5fatI
239
+ 3tKwbLJEYZg
240
+ 3v1tt9mD6cg
241
+ 3viekW3AnRA
242
+ 3vudNvflufc
243
+ 3ytLt3BDqFU
244
+ 3zhCbaVBEjg
245
+ 4-1Nci8AkAc
246
+ 4-7ABAFvCZE
247
+ 41Q97FiM2n8
248
+ 43wXnwHo_qU
249
+ 465Wt-eX2RY
250
+ 47OtSe4dffo
251
+ 47slfR-Knq0
252
+ 4A9OeOEnFyA
253
+ 4BPibf6C35E
254
+ 4DdTNSKleK4
255
+ 4Hg2Eu-F9mU
256
+ 4NXUGnLbl5Y
257
+ 4Q6NknbHNiQ
258
+ 4QQyjqtHwlY
259
+ 4QgmM1dcHMw
260
+ 4R4aN98Qrbo
261
+ 4RN-c0HdJOI
262
+ 4cz4pq_N--g
263
+ 4gxMVLXmBiE
264
+ 4kR-Fev28po
265
+ 4l515yNhqxg
266
+ 4lB_x0kLffU
267
+ 4lOnYQCxbOM
268
+ 4neIRMYUT0U
269
+ 4nqMZX542bU
270
+ 4oXZCuXwrXo
271
+ 4q7lHy-1U6o
272
+ 4qGWz9v-UXk
273
+ 4rJ_RB3Iwws
274
+ 4tbkGu8boyo
275
+ 4u7y4RPfV5c
276
+ 4udsKgSP7UA
277
+ 4ujhmvsSE9c
278
+ 4x234xeqtHg
279
+ 4xlKjVPFFv4
280
+ 4zzVjonyHcQ
281
+ 50zJWKotPP4
282
+ 51Qp9Z3ZlHs
283
+ 58GcsaJhTzM
284
+ 58K7XpkuMCM
285
+ 5JG5xwguT6I
286
+ 5K4dtC2A_bw
287
+ 5OXy29bFfPk
288
+ 5PGVFUADgGY
289
+ 5PUTMuJxqZQ
290
+ 5TfPoq3lkso
291
+ 5WZmSJYYxPk
292
+ 5Yz7ssYCa8Y
293
+ 5Z8x06jnjSk
294
+ 5ZYrc-3gQVo
295
+ 5db6EORa_0U
296
+ 5dsxC8M7vCM
297
+ 5gx3WNfa56g
298
+ 5icAa3G2X8E
299
+ 5kas2jBObUY
300
+ 5kuqWp4gjV4
301
+ 5l2j8k759-4
302
+ 5leH4t1V9LY
303
+ 5llwjtJDqMo
304
+ 5meC4Z61qGg
305
+ 5okG5zh9ePY
306
+ 5oybklNuCZU
307
+ 5qaBD538C9k
308
+ 5r2Y6QsFACY
309
+ 5s9UdpT0TOo
310
+ 5shU5ZVQuEc
311
+ 5t4cwPdsVgQ
312
+ 5ty6o1t2emk
313
+ 5u-Aw6NOIy0
314
+ 5vAj_wY8c7s
315
+ 5xeVIgWq7s4
316
+ 5yZEtG_r-wI
317
+ 6-6m2H_aE-s
318
+ 6-O4gWLQkgk
319
+ 60bkwhHzols
320
+ 60dSEN1RIbE
321
+ 627Ufg0PVkg
322
+ 62OgF2Pw09o
323
+ 62aDb2JgFQc
324
+ 63gG5qmwREo
325
+ 63iL92MjDT4
326
+ 63stDIiwraM
327
+ 64Yx3Odqo9c
328
+ 67zz9OzY07c
329
+ 681IyvC_JAg
330
+ 685CK3xr0jg
331
+ 68pNYnDuoQA
332
+ 69pi63RgeJ4
333
+ 6CIMXUDzKtQ
334
+ 6CS50XMGV0o
335
+ 6Dtn8MfMPVE
336
+ 6Fd-CsvZQX8
337
+ 6IA7AuXVSxQ
338
+ 6IdeRRqG1ak
339
+ 6JIUlIwX3Pw
340
+ 6JmuCmI7Iqo
341
+ 6KKDqqV8OUk
342
+ 6LljU1cDSbI
343
+ 6MhEveLVeO4
344
+ 6Nyz_yd_GJw
345
+ 6QgRw4lDN10
346
+ 6R08SayU3bU
347
+ 6SA9lGH3JN4
348
+ 6SsIE00duz0
349
+ 6T5iJ6TjWj0
350
+ 6V3kI3QBWK8
351
+ 6VVCVtxeq1g
352
+ 6YBDqpgpVck
353
+ 6aAa8M4vg2k
354
+ 6bm6Y9TaX1w
355
+ 6bnxY8W5otQ
356
+ 6frz5TaAIto
357
+ 6gyKLNQH44I
358
+ 6iXGqEq_cpI
359
+ 6lMj2CGW6u0
360
+ 6lzxVmBJIlk
361
+ 6mL4rjxEnbo
362
+ 6nDOJdyJDk0
363
+ 6nPPyCrkwSE
364
+ 6ng7_H8pdBo
365
+ 6oHmiJLrEAk
366
+ 6oLStjIxffE
367
+ 6p6boxrvbZk
368
+ 6pCBqmPozE4
369
+ 6ptI5B4a-ag
370
+ 6rKSZPwHTf8
371
+ 6tx3pU5F1x8
372
+ 6wyDTrAPV7s
373
+ 6xSxXiHwMrg
374
+ 6yt3pqCQn6s
375
+ 6zXMtU6jgc8
376
+ 7-AfS9rehcM
377
+ 7-ByRppD-EE
378
+ 7-QzoS-dW-c
379
+ 702SXH0JdaQ
380
+ 73hls2GdB5o
381
+ 75cv2PgOmX4
382
+ 75mMPO0x4Gs
383
+ 78UIF4JCcYc
384
+ 798S4UbhNE4
385
+ 7AjsnSEhZ-w
386
+ 7DJdGbYHll4
387
+ 7KcRJyXmuzo
388
+ 7MBqGLvoQG0
389
+ 7ONrAflL5Oc
390
+ 7PQ0QmUGpvw
391
+ 7Q2Qe-zid24
392
+ 7Qu_ETu3vi0
393
+ 7Rgh8v8Qmzg
394
+ 7STjD4eWMs8
395
+ 7UQMqOkGD8M
396
+ 7W9ACrwLn1A
397
+ 7_ppXSABYLY
398
+ 7bAPKKE_tzA
399
+ 7bC_8QTdbHM
400
+ 7bXAZfRTZQk
401
+ 7c0wYvntu8M
402
+ 7c7ccOHMK8o
403
+ 7dLu8wqYpJw
404
+ 7dmNo3X-Lus
405
+ 7eOwm6nBBGA
406
+ 7elaTVxAEX4
407
+ 7f6b7b_yzQY
408
+ 7flG_V2SHc0
409
+ 7g7RpRl-Pi8
410
+ 7hHOLdgvG-4
411
+ 7hfFxTbLFWs
412
+ 7jyokhjUCyk
413
+ 7l7GryUZGvY
414
+ 7mHwsV3Mb-Y
415
+ 7o8NcTiXGYI
416
+ 7oKjW1OIjuw
417
+ 7pg_Dgs0wUU
418
+ 7s_Sb4-mwes
419
+ 7uwUj4aX2YA
420
+ 7w68Up6F9ZI
421
+ 7zWswXer8i4
422
+ 8-RmR1XmaxE
423
+ 8-hAwTTVYFM
424
+ 80It12pD4j8
425
+ 829gaOsl7Z8
426
+ 86OqcZtLtgE
427
+ 8BZC5UBidm0
428
+ 8C5cn_Qj0G8
429
+ 8CTEl-Zhv38
430
+ 8DzVNa_BjOI
431
+ 8HbsSkuiPmg
432
+ 8J6FseCtEXc
433
+ 8JtWrFwQ-h0
434
+ 8KD2cPzxF3U
435
+ 8Kux1TQWdLU
436
+ 8LSK5wIjZMk
437
+ 8MPjG8RMYJY
438
+ 8N-x9QC50m0
439
+ 8NJ74aVTZMc
440
+ 8P3rViI8Xw8
441
+ 8SIWlQ0ZUqY
442
+ 8ToIwnP2a-g
443
+ 8TunLMoE9Xw
444
+ 8Uij8BYDuf8
445
+ 8WfV93go5TM
446
+ 8XJp2c05iVk
447
+ 8XlrNlfd-9M
448
+ 8_yb_nW5x6I
449
+ 8aafXYh_gHA
450
+ 8bQPi0ssTLw
451
+ 8dI7AzzZLXw
452
+ 8e1BMiU951c
453
+ 8gg-oKufUo4
454
+ 8gtpnlVb31U
455
+ 8lrlXoXGQo0
456
+ 8pcELaZV2b0
457
+ 8qUQwmwC7Oc
458
+ 8quGD9W7B2I
459
+ 8rwcfIrAXtA
460
+ 8uyPpy2ejA4
461
+ 8vzKZVqXmo4
462
+ 8xhqhC_PHzE
463
+ 8xo4s6tYzzs
464
+ 9-EtiWDJbJw
465
+ 9-a3sSZeXDE
466
+ 9-nsXlNXRDw
467
+ 90dka2zrP1Y
468
+ 90sJHDwKmSg
469
+ 91Zu4JRnfxc
470
+ 91z62p7t-AU
471
+ 95hYfRw1aHA
472
+ 95zvFw1VkqQ
473
+ 97C_sZ2821s
474
+ 9Dm1Sekkcdw
475
+ 9G4s_qIxJYk
476
+ 9I8mQpFAJ50
477
+ 9IGRsXq9Wis
478
+ 9ITi5C8vHpw
479
+ 9JJ3ullABD8
480
+ 9K4A7e3clMM
481
+ 9KPyflyHP6s
482
+ 9KisGZnflBc
483
+ 9MOleOgz5To
484
+ 9O2D9K7l-FU
485
+ 9Pj-QPlN2CU
486
+ 9QeqkUN0bNU
487
+ 9S9O2T1B6xE
488
+ 9SflFku0eKo
489
+ 9UaAyI-uI30
490
+ 9Wb409Nlhlw
491
+ 9WkYKiltzJU
492
+ 9XYx1vUd-v8
493
+ 9Zb8e6nE5QI
494
+ 9Zuk9Huqdrg
495
+ 9_VLbfRXTss
496
+ 9awtQRbMhG4
497
+ 9c7zctYTBLA
498
+ 9dCHp07it-Q
499
+ 9eHXo_KFvJ0
500
+ 9f0eB75r-Y8
501
+ 9h3m1jzWWEU
502
+ 9kUljb9G-MY
503
+ 9kgiApzKDMw
504
+ 9mAiyn6gMJw
505
+ 9mnTCYsbKfw
506
+ 9n7hn-M4GpI
507
+ 9oMmZIJijgY
508
+ 9p8z1A3TsxU
509
+ 9r8yZ-68pkY
510
+ 9s3BPDNEJek
511
+ 9t7ujBSH3WM
512
+ 9ujM2nAMK0g
513
+ 9uyYxs79EcY
514
+ 9ychKZIG8ms
515
+ 9ydk7zFmOxU
516
+ 9ydusnvBSys
517
+ A-QOg-tFApA
518
+ A0AViKj8EGk
519
+ A2B-b-nfCqk
520
+ A2DdRsUdFeU
521
+ A2lEI0kaf3k
522
+ A3uNIgDmqwI
523
+ A6xjN8BqDjk
524
+ A7ktYbVwr90
525
+ AAprf4PLDM4
526
+ ACsoFzXDE3Q
527
+ ADQSqUBZjvE
528
+ AEKZERIDiUk
529
+ AHKGGtJ15o8
530
+ AIXQngEnJgY
531
+ AJa2DO_woJ4
532
+ AKkYU-fExWc
533
+ AL1pISpcG2Q
534
+ AP0S21vT3Co
535
+ AQe2ANirwW4
536
+ AQuAGO9ceIU
537
+ ARXYGhV5VFg
538
+ ATgNzwHSfjw
539
+ AUtRgfFUCl8
540
+ AVc5fXa0oMs
541
+ AWJcd3F-HPY
542
+ AXfRrHD4Cps
543
+ Ac8Fsu0WVKg
544
+ AccPbM4JhFI
545
+ Ad5je4UNgDw
546
+ Adgif4D3ujk
547
+ AfZNZ3bfJvA
548
+ Afy7H04X9Us
549
+ Ag-zqjX1TV8
550
+ Ag1AKIl_2GM
551
+ AglvA1tduMA
552
+ AkU94JdXbQ0
553
+ AnxrJiS5uKU
554
+ AptPjGKdaDU
555
+ Aqu7R_7vFKM
556
+ AuLoMmjFONE
557
+ AwvReatHB2g
558
+ AygCTeXnJ6o
559
+ AzECoalJ4WU
560
+ AziRcPo6rm0
561
+ B-8ovk81nNM
562
+ B-qxGhkRojc
563
+ B0KlNLkO3qE
564
+ B5uSsp0Rbbc
565
+ B7HzMw9rSMs
566
+ BAj3fHStRGI
567
+ BB4bpEKrOlM
568
+ BBWIPL66Fpc
569
+ BDLtYpLZfbU
570
+ BE4Y5Uc53Nc
571
+ BElqXjOG5Gk
572
+ BErluP3jDjw
573
+ BF7hcsKb1WA
574
+ BFWbo17t-Ig
575
+ BFat39XKT2E
576
+ BFoqBnl5XNw
577
+ BGJuAODr8Ks
578
+ BH7-eZYnJkE
579
+ BIuRA0GGIgk
580
+ BK1VxSDsCu8
581
+ BLcZAhQzQF0
582
+ BM2mqrIXY2w
583
+ BNgmYFwUjjw
584
+ BNyCZJOTRZA
585
+ BPLCiXRSBNk
586
+ BQL5wkJS4y0
587
+ BUouNsjhTTc
588
+ BXT72YlkQrA
589
+ BY2ADlTxdZI
590
+ BYgCS4of0TI
591
+ BYvQek24Kbs
592
+ Bb3XWac-WuM
593
+ Bc7WoDXhcjM
594
+ BcTOSxcv2_o
595
+ BcjJjiE4Ivs
596
+ BfdcdAIJh4g
597
+ BhEio_W1LU4
598
+ Bio8ZpEFlqY
599
+ BjrDmB15S-M
600
+ BkTiMi0Owuw
601
+ BlCoAfks8kk
602
+ BoWXd-LNnm4
603
+ BoXR8KrAfIQ
604
+ Boj9eD0Wug8
605
+ BowyM_Wlsd8
606
+ Bq0vohH2pL4
607
+ BsDYAcOsWqk
608
+ BsgJuviWgJI
609
+ Bv3taVzLZZU
610
+ Bv7wdHRhifM
611
+ BzAzXDgqKzc
612
+ Bzb0FhMqaU8
613
+ C2TLW8MS33E
614
+ C2rVYklWl8I
615
+ C3589eDY5rU
616
+ C4bevJm-MbI
617
+ C5WNr5vzUPs
618
+ C7c1LAUbSho
619
+ CA-0cn2Dbgo
620
+ CBYhVcO4WgI
621
+ CEJTBzMKcuk
622
+ CFUGsvVC9mQ
623
+ CG0OnKUqziA
624
+ CIIsZt9c8nA
625
+ CMAiUAvqIh4
626
+ CNFTLmMYY1c
627
+ CPckick_ioM
628
+ CQUP1LhDLPM
629
+ CWLEHmYHNro
630
+ CZjbKqbYS-A
631
+ CaD5TRQQNsI
632
+ Cd4ZxGNbwTw
633
+ CfA30p4X9g4
634
+ CfKy85LA_bs
635
+ CjnVq4zLT6s
636
+ Ckrrk5oBneA
637
+ ClVtccpCs6g
638
+ Clsy0PuGl2A
639
+ Cptzvn3nM0Q
640
+ Cr0cBSpnn40
641
+ Cr2VggKQrQg
642
+ CsG0Or6-SiI
643
+ CtpXTXjQk4o
644
+ CuZnbR4fb_M
645
+ CuoCrLWcsjI
646
+ CuyAI82HFe0
647
+ Cymmi8L0O1E
648
+ CyyImnREpbg
649
+ D0TgSpsBabY
650
+ D17AYqYPFDk
651
+ D2UT1AmyZFE
652
+ D4T0Ffg1I_Y
653
+ D5ZZUHKPC10
654
+ D5gW_X-Db74
655
+ D7wvFZjtVOw
656
+ D7zgzpc_PVI
657
+ D8TFETlLRdA
658
+ D9aRZXIOX5k
659
+ D9ocoySPGOk
660
+ DA2mx0793uI
661
+ DCUnruZponA
662
+ DChlO5fNMGw
663
+ DFqDbrTGTnY
664
+ DGKY7K-pyqw
665
+ DGoTdntvKfs
666
+ DKnHYcKfz6Q
667
+ DMVduTyjp1k
668
+ DPDF4odJrq8
669
+ DRHNdWF4Kho
670
+ DSoKEFb8R_w
671
+ DStiXbf9Mk0
672
+ D_obsdDVv20
673
+ Ddltu_Cq4E0
674
+ DdtEInQEQ-s
675
+ DeeRqZd7sCE
676
+ DgGKe3kCx74
677
+ DgpbuHAOgf8
678
+ Di9AwmKtblo
679
+ DjHPXW6Crac
680
+ DnOLvKEYIQI
681
+ DrDOMMmwPvI
682
+ Dtfi2BHWwaY
683
+ DtuJ55tmjps
684
+ DuNW0KQ_GZM
685
+ E-hVDqrQq6M
686
+ E-qQNDCVSnk
687
+ E1QbVtkza54
688
+ E1gXzYA0tFA
689
+ E2uUfyT64VY
690
+ E3Q61rKXhrM
691
+ E4zPYb7O2EU
692
+ E7XkvbCu-jU
693
+ E9tUCuAZ-LU
694
+ EAc4WhNQZ30
695
+ EB_d1jK1R44
696
+ ECfLtssUZa0
697
+ EEq8OK4BUyM
698
+ EFLh9Vqr-YU
699
+ EGCqtu8qujE
700
+ EH6q2YGx45M
701
+ EHp_I9ETmtQ
702
+ EHrbAhrbw9Y
703
+ EJMlBT6jptE
704
+ EJZvz09LVa4
705
+ EK3WoT1Gqvs
706
+ ENXP9HEul98
707
+ EQJN03a6M28
708
+ EQLg-kHxwCA
709
+ EQ_VlFQT9hg
710
+ EUS1m5MSt9k
711
+ EUqWrqb9Oug
712
+ EVBOSk6kEwg
713
+ EWSp2QMzKv8
714
+ EYaKFlWd2MY
715
+ E_LYrCtoTIA
716
+ EaHZLUWwxfQ
717
+ EaQOEXTkKYg
718
+ EckpALTiYhE
719
+ Ed4bxiP1RPM
720
+ EeqwFjqFvJA
721
+ EiW4lKrMXQ4
722
+ EjB9J20nulw
723
+ EjBkTt0LHbU
724
+ EnPJk_9Ug7I
725
+ EnU6HVRC4s0
726
+ EoHwvsJcBNg
727
+ Eojazns82hw
728
+ Eotj0EeepoQ
729
+ Eoxazjg1NUA
730
+ Erso0HgtV5A
731
+ Esu34JYC2YQ
732
+ EtctGvH92Ww
733
+ EuVBpqBgmvM
734
+ Eup2Ca9Kiis
735
+ EwXXD0uLj8I
736
+ Eylcb4rbLSo
737
+ F-GzNvvs-lU
738
+ F-nsVjM7FmU
739
+ F07fXd4vVlg
740
+ F0InXG0ln4Y
741
+ F2G-buBtp7w
742
+ F2yK5VkHRaA
743
+ F4xgvj4kSnU
744
+ F5KV-iaMKK0
745
+ F63B6wWXGtA
746
+ F6ShWjU7GaI
747
+ F7oit5SKxdw
748
+ FAf0YtSelug
749
+ FBQDiiEbknE
750
+ FBbfCBOJOt4
751
+ FEc-U45TzKM
752
+ FGgMrNSmMn4
753
+ FICT79cA3U4
754
+ FL9qpSH5eKw
755
+ FLI0WmBWWv4
756
+ FLIippwrXSc
757
+ FMIDAWVPq7c
758
+ FNErh9EogUg
759
+ FTN_93Px-Qc
760
+ FUSU_WYPwx4
761
+ FV4aEpanJ2I
762
+ FV7tKSeGr3Y
763
+ FXbC_3_8tGM
764
+ FYo5E7zT-vM
765
+ FZLwYiceIOU
766
+ FaoVpVXcZsA
767
+ FlCNvBBqIyU
768
+ FloFzFl0jZc
769
+ FnKhFaijBBI
770
+ FoYce_3oUGs
771
+ Fpo6nvSZirI
772
+ Ft1Nw-Hy8Ao
773
+ Fwh0r8YNLIU
774
+ FxBLtp7UpTI
775
+ FxQchGBQpZA
776
+ FxoOE2dTCHE
777
+ FxrCNf8utsE
778
+ FyPFdBhklEw
779
+ FzXWP7ZHs-Q
780
+ G-6OXtSMyNI
781
+ G0n970JRNII
782
+ G2tfebkUbPo
783
+ G4SMtaNDtfk
784
+ G5hOJXZmqPc
785
+ G66ClBEmWdY
786
+ G7GI04txkOM
787
+ G7T6dTs5YKw
788
+ G9CHdvWwzQ0
789
+ GAS_7760FSo
790
+ GBvfnfwGq5Q
791
+ GG0F0uXuIqQ
792
+ GGIZzL-1gZ0
793
+ GHjjXhd6WAc
794
+ GJar81QVmQk
795
+ GKCxRcuLm8o
796
+ GLUdt99wUY8
797
+ GMndqLvTqhA
798
+ GNx4EbTu10w
799
+ GOQWmjLkqOU
800
+ GPEavB9GXHc
801
+ GS1UT0mSks4
802
+ GTsSe03hPxY
803
+ GUZ4T_xFtwQ
804
+ GVFq0_6imAA
805
+ GXldrjxDZqQ
806
+ GZRbKJMEdk0
807
+ GZVxh8CQFkg
808
+ GbUfu1wF02s
809
+ GcNJkyYSmW8
810
+ GcayBgPOr04
811
+ Gd_zypjbv9E
812
+ Gf8lUImdL3g
813
+ GiShqIyw-_0
814
+ GnwqktjWrVM
815
+ GoZ6KwuAdT0
816
+ GpAOMD6Z_Cs
817
+ GsYl_thySnM
818
+ GtzbWxb9nuQ
819
+ GuVNl_oEMuQ
820
+ GwU1VXsDXbc
821
+ GxDeY_UiGBg
822
+ GxRgKs4TpWo
823
+ Gzo5PEGQpe8
824
+ H2LhBAi-Q8I
825
+ H450lZb-Mdg
826
+ H4TYEoft_rM
827
+ H6L04OEm71w
828
+ H6ODJtcqyTg
829
+ H8t_snz8B5A
830
+ H91qCYIfZuQ
831
+ HA8PjarK2mo
832
+ HAFKRtBHFlQ
833
+ HC-zhWIIC5w
834
+ HCMZ4s2A_k8
835
+ HFCmCsxt1xw
836
+ HFTVP9qIMPE
837
+ HLXDpFYmyqo
838
+ HNdh6Valoys
839
+ HOpKzDhCFtE
840
+ HPHNakev2ss
841
+ HQpZQj3TY80
842
+ HSjvtpwKyhU
843
+ HSu21_qc2kA
844
+ HTJyZwYPQOI
845
+ HUoUVlK_bHI
846
+ HVMqm9jlUDM
847
+ HWNwvBrUUGQ
848
+ HXSxHJO6Srg
849
+ HZ0pn4ijwnQ
850
+ HZuhPDbZtcg
851
+ HbKvcTZqNA0
852
+ Hb_SVDUmWzo
853
+ Hc6K7g7wqrs
854
+ Hcy27nbeNWY
855
+ HdzDCZ28cI4
856
+ Hg0qN4cNJfY
857
+ HhFPSCGRFHY
858
+ HhNo_IOPOtU
859
+ HhljUdMUbs0
860
+ HkfLT86wPkM
861
+ HmexkGBB428
862
+ HsGbFpi3xtk
863
+ Hy6QRP3ENl8
864
+ HyzD8pNlpwI
865
+ HztoBDblr8o
866
+ HzzUW9y9FCQ
867
+ I-9uLKZmxOw
868
+ I-HFjHKJJ7E
869
+ I0lqvxqEKhU
870
+ I1fZdwFStnY
871
+ I25TYNMclKk
872
+ I2Z6LNkwijk
873
+ I3RMF_9xW1o
874
+ I68lZ9jptWU
875
+ I8tkl9kVfaI
876
+ I9taZpV2JfU
877
+ IAYJhZS231s
878
+ IB6_L9xsnAo
879
+ IBESpBTIQTQ
880
+ IC3EX6ipxFo
881
+ IGEJo3QHvSI
882
+ IGgpoI_0oPs
883
+ IKMjg2fEGgE
884
+ ILQii0-r-bE
885
+ IM-TQHJKefA
886
+ IM1xpnkmG7o
887
+ IMEIBu0uULg
888
+ IMFI8waM8rs
889
+ IPPVWl-jPqk
890
+ IRBAZJ4lF0U
891
+ ISip9JRbYNs
892
+ ITBbGDndjGM
893
+ ITjHgkTTX_s
894
+ IaHioK3Ljz0
895
+ IahmVXN7xEQ
896
+ Iam-aEiQOeI
897
+ IatKu7sngG8
898
+ IdZPa5vWdtc
899
+ If2Fw0z6uxY
900
+ IfnTz7vZyVg
901
+ Iib7x8rYE7E
902
+ ImCLzPvVKTI
903
+ InL3YA_6P6s
904
+ InP5DEpeVSU
905
+ InSvBuHK4vI
906
+ Io6JjgckHbg
907
+ IsMFdiLsqbg
908
+ Is_C4-xmayE
909
+ IuuRvopzIf8
910
+ IxYu1FAY5qc
911
+ J1OGqF5Eo1k
912
+ J3-ySSl7ceM
913
+ J44SPYSVAAc
914
+ J4vKu-s3OqM
915
+ J71SYNCcRQI
916
+ J78srd3-odQ
917
+ J7BLQbvZyrU
918
+ J7ei1-rYHMU
919
+ J8ifUEgXF-o
920
+ JC6pZ92y-hk
921
+ JFkqEW-sz1c
922
+ JGwMIlpgR2A
923
+ JIEgN3las5E
924
+ JIvGXG4z9X4
925
+ JKg4o6SHbCY
926
+ JLEPOAlZ7LU
927
+ JP5ywOknF-8
928
+ JPP_3u8gD-U
929
+ JPmZtJ8vgAo
930
+ JQTxw8OdBKw
931
+ JTq75Se8vRA
932
+ JUM_s4uQDW4
933
+ JUSVl1JXYl0
934
+ JVSwiQ0tNLM
935
+ JZTlzyHnQeA
936
+ Je69HPxSd_c
937
+ JeZ5gAUnlFk
938
+ JfjLKBO27nw
939
+ JgwRwOWyR0Y
940
+ JjSIRfrDKF4
941
+ JmWnjvHEM38
942
+ JmjxtqnhzHI
943
+ JpL__knumpM
944
+ JqQtOGCWrQM
945
+ JrIwV6YniCg
946
+ JrXU1owDEVs
947
+ Jriq9eOSu7g
948
+ JsFEIvtKCns
949
+ Jtu5eFsIPmw
950
+ Juen1iIJQTE
951
+ Juv1TqsYiV4
952
+ JvpxC406_jA
953
+ JxR5EZ_GY1o
954
+ JxUqtbpjpqg
955
+ JxWjxAqCrCQ
956
+ K-FG2oWl-2k
957
+ K1hPyYEMp3s
958
+ K2POkUf2EUQ
959
+ K2QS3ZvjPMM
960
+ K2bh3ZJOFnI
961
+ K2pUtcVSXEo
962
+ K3c6AMXsam8
963
+ KAsvI2qAzlc
964
+ KBQRetXolA8
965
+ KCcenWMXQQ8
966
+ KD6bh5ZfS2k
967
+ KFDlZxR4yG4
968
+ KIiKNpySv6w
969
+ KK5120o36GM
970
+ KNjgy1o65SA
971
+ KP3ToVHnOZY
972
+ KQQbMdsFsdQ
973
+ KQkK8ThNsOY
974
+ KTxhy419vto
975
+ KZvz22uAVM4
976
+ K_vUbBQzFjw
977
+ KcdTad5fztE
978
+ Kd9RcLW7knw
979
+ Kgs0QbthCEU
980
+ Khnx2cNTiu0
981
+ Ki9AHohJpIc
982
+ KihglmOX7j0
983
+ KjMe0TXyQzM
984
+ KjWVlz6cAyY
985
+ KlC5HJFI40M
986
+ KlcZwSsceL0
987
+ KmmMz3NfoU4
988
+ KoqaUANGvpA
989
+ KqygzQmEuhA
990
+ Kr3OvLakOgk
991
+ Kr4Xe-BghFU
992
+ KrEzYXCjJDQ
993
+ KtHgPTAdfYM
994
+ KtVfCgNJdeg
995
+ KuqcVxRqQHI
996
+ KxbRf-ZlSSk
997
+ KxcNu6WZSCY
998
+ KyBD2AeGXIE
999
+ KyDTB0i_wQ0
1000
+ KzVEGlNNXuI
1001
+ KzVzRN5XoJs
1002
+ Kz_wvavZp6c
1003
+ L-bQmqP82oU
1004
+ L0LZDwNDqRQ
1005
+ L13WvYq8G68
1006
+ L37uYJnDxVY
1007
+ L5M93LmOmqw
1008
+ L6SiuhBZWDk
1009
+ LCsx9rVQjwE
1010
+ LDXrXC0cPBo
1011
+ LFBC0d4i3jE
1012
+ LG8TfyiQr7w
1013
+ LGumhl8-kiY
1014
+ LH0IOQrB-NU
1015
+ LH1WrGpM7p8
1016
+ LHFY1Vg97AQ
1017
+ LHNNYQ57V2c
1018
+ LJJgAqdxBdA
1019
+ LL-SFPRBBFw
1020
+ LObC_A4G-_c
1021
+ LR7ZZ5gw984
1022
+ LRXrEiBeOXo
1023
+ LTZPvLi3Hdc
1024
+ LUhyxjNub8I
1025
+ LUolzAltwKI
1026
+ LVYVlHr2FKU
1027
+ LXAcG9mITz0
1028
+ Lawz1Mc16Vo
1029
+ LbQo2tKGxV4
1030
+ LbR6jjbgbis
1031
+ Lbx1F4V_-8Q
1032
+ LgT5uYkkPE4
1033
+ Lgo_bkq9SWU
1034
+ LhEzvegA-Dg
1035
+ LhF_sfDfBd4
1036
+ LiCmCW6EGzA
1037
+ LiWszVY2lW8
1038
+ Li_m7BVja44
1039
+ Liv3vT9dGhU
1040
+ LkCrGs-XeYI
1041
+ Llq5mmhLy8s
1042
+ LosFY2otV8E
1043
+ Lq2icL5Y_FM
1044
+ Lv3WAxgaZqU
1045
+ Lx48775nwWs
1046
+ LylMvKFdwJU
1047
+ M5axFzT2_u0
1048
+ M5zDCmXSejU
1049
+ M6ZdYNFo6gM
1050
+ M76UHFsQp2U
1051
+ M8SkT5nE-0s
1052
+ M8ayDH5DuJA
1053
+ M9Xsci4JUy4
1054
+ MCQ3H2jfCBs
1055
+ MDyjY3uiWp0
1056
+ MFcNY_CyXk4
1057
+ MGGCuJdo8xs
1058
+ MJnxzdG5QwM
1059
+ MLUJVpk0BM8
1060
+ MLay5YHp48w
1061
+ MMoBEZ_d2g8
1062
+ MMwKA-Ku1mM
1063
+ MSr3hE3z2jw
1064
+ MT6uJni993A
1065
+ MTBfv2io-pQ
1066
+ MVI_yEqA2RE
1067
+ Makb_p6HcxE
1068
+ Me95iJdHO18
1069
+ MeM6r8Nj8G0
1070
+ MfS4oDLBpp0
1071
+ MgLHVw0tUBg
1072
+ Mh3Dvs7DwdM
1073
+ MimJZypAiy8
1074
+ Misd5Qrx_CI
1075
+ Ml1I6WEYSAY
1076
+ MmfiHdQ4Wfs
1077
+ MousuD_jX24
1078
+ MpGxOR50sn4
1079
+ Mrcn4Q50j5s
1080
+ Mro6gxnOfus
1081
+ Ms9K0eZLnFQ
1082
+ MsA2OJiYApw
1083
+ MsBU3uGpUGw
1084
+ N15YpxEHjVs
1085
+ N1cJakFhjNo
1086
+ N4t59MjWdsg
1087
+ N5esEarb5MQ
1088
+ N8XGCs0js30
1089
+ N9JXjNCR1EM
1090
+ NDsl1_vHHTc
1091
+ NISKpzp_QAM
1092
+ NKJfBsc5kHk
1093
+ NKKaWmoFdZA
1094
+ NL6zaIPoU-Y
1095
+ NMWqBL_Uhr4
1096
+ NM_SJwSMRT8
1097
+ NMpf6HNYIzc
1098
+ NNK4pvyOhAU
1099
+ NNp4yt9dHns
1100
+ NNwM_OMLa10
1101
+ NOYFz4DIfh8
1102
+ NSZvUu8Q8ZQ
1103
+ NUKIA9I6gRA
1104
+ N_kIz8R84jU
1105
+ Nbn1NJjbqoc
1106
+ Nc-HP2vyKoE
1107
+ Nchrj-dzVgs
1108
+ NdKNfaWpCj0
1109
+ Ndnq9Ofs2eA
1110
+ NhOwdlKHcAE
1111
+ NhfsI4jbWgk
1112
+ NjN07qsdh0w
1113
+ Nk6N7ieiphs
1114
+ Nm2nt6dxVv0
1115
+ NmBcsOMtKqM
1116
+ NnPyqGW2w2Y
1117
+ NoIHCm4wrpk
1118
+ NoZ7ujJhb3k
1119
+ Nom2-9WmsWU
1120
+ NpskXvrCNA0
1121
+ NysbSdox6zM
1122
+ O0D0E42AA4I
1123
+ O0hP4Hrek4s
1124
+ O1UBCHWPIqE
1125
+ O1d4dHwSZqs
1126
+ O1j79d0IuhU
1127
+ O2AnvMTbDCw
1128
+ O2YHi_g2JuY
1129
+ O3iPo__LYZQ
1130
+ O3jxAN3j_P4
1131
+ O3sFnc87STU
1132
+ O42jXkaQtQU
1133
+ O7dLxPQvIkI
1134
+ O8G_glgmBcA
1135
+ O8gZs9BCr4Y
1136
+ OBlw3eBxHvA
1137
+ OCQd02hORJQ
1138
+ ODx9C7kHmWs
1139
+ OGhvz1fwacA
1140
+ OGzPe8LXHeA
1141
+ OHHqegRBDWg
1142
+ OI4sCSsyS4s
1143
+ OLbdw1imnKY
1144
+ OLe9xNH4G6s
1145
+ OLfHpvJKNg0
1146
+ OMeIMC_s0GQ
1147
+ OMw8kl2kcZY
1148
+ ON3YD52Df5s
1149
+ ONvg9SbauMg
1150
+ OOJ7OYp1gjg
1151
+ OPzyc9rXx-I
1152
+ OR6qP-X5fcs
1153
+ ORObgzbi8Fc
1154
+ OSrCb8eDWWk
1155
+ OT_0JLIALxk
1156
+ OUE8rKxYGLY
1157
+ OUoQ4c1YyJM
1158
+ OWdqgZQdMgw
1159
+ OX0OARBqBp0
1160
+ OYJh1xJbQys
1161
+ OZWiY3xcr1w
1162
+ OcPW_rtcbio
1163
+ Odb1XiXpRTg
1164
+ Odw7NOmXeaU
1165
+ OgzTnofR1WY
1166
+ OhHBPhb8a1I
1167
+ OhsXUPDBX90
1168
+ Oj22JVi762E
1169
+ OmdRZ6ZM_pc
1170
+ OnPdfl9qKRg
1171
+ OpaFC283wJE
1172
+ Opx0cWUuaBk
1173
+ OqTI0KYkeJQ
1174
+ OsBgWxoAOf0
1175
+ OsLj4GyNvYQ
1176
+ Osp20p7mHLw
1177
+ OtTAH-lcO_M
1178
+ OuP7vIfN3GI
1179
+ OufFx-XmsLM
1180
+ OyNBayVulb4
1181
+ Oysv7K93B-w
1182
+ OzIUFdCRm4o
1183
+ Ozk5w3I6wlU
1184
+ P-9Z6WeojxU
1185
+ P2-SOq0SrmU
1186
+ P9M__yYbsZ4
1187
+ P9S15gsSywg
1188
+ PAFybFU4fzI
1189
+ PGQTyzsX7V8
1190
+ PJXZQrwDPdQ
1191
+ PJdbIALsMYQ
1192
+ PNJghHAUlLg
1193
+ PRgoisHRmUE
1194
+ PT0KwTjsMUo
1195
+ PTOZrIogdhs
1196
+ PTaeZggiMrM
1197
+ PUYoRT2EA5Q
1198
+ PVLxoTzL31U
1199
+ PXNdbOr8f9s
1200
+ PXf59S2kFag
1201
+ Pa-5lbobZpk
1202
+ PaEO7DFyzZY
1203
+ PiHMIYoV3OE
1204
+ PjlEL4poXaU
1205
+ Pnfh3Bxo4mE
1206
+ PnxpH92CJOU
1207
+ PpE-wfM5NhU
1208
+ PtV9uxQHnGY
1209
+ Puu_PXPw_H0
1210
+ Pw_4_f6PQno
1211
+ Pw_JdBkki_I
1212
+ PxVZGdKMmvc
1213
+ Pxg5mHeIoTA
1214
+ PxiRvHqbQoo
1215
+ PyNVMlDyNtg
1216
+ PzzNuCk-e0Y
1217
+ Q1MYDq7GgBc
1218
+ Q5iKAqZ9yVU
1219
+ Q71FNI--3vk
1220
+ Q9W5Lxr-7v4
1221
+ QEKRkIbCZEg
1222
+ QEz1caz0loM
1223
+ QFJZwvkJsGk
1224
+ QFbp5scBzys
1225
+ QGbL4LlrcIs
1226
+ QHeYQalwm8Q
1227
+ QIz-y9-jywM
1228
+ QJj7WiwcadU
1229
+ QKxRCmpAFKE
1230
+ QL2Hb-v1r0A
1231
+ QLunLzt4r4k
1232
+ QMcdgAriqy0
1233
+ QO7Jhl-r-BY
1234
+ QP2zJO0AtlA
1235
+ QPKKQnijnsM
1236
+ QTp2snIa-cU
1237
+ QWgbM9DoA7A
1238
+ QX1d52gmEZ4
1239
+ QZhzZtExnfc
1240
+ QaqyAdedM0Q
1241
+ Qd3DEZud65I
1242
+ QgscBSUsuNU
1243
+ Qh7rX2S4lFs
1244
+ Qi4P0mLkkLQ
1245
+ QkmGfY9iY9Y
1246
+ QkwYl6HJHdc
1247
+ Qlzca3efn6E
1248
+ Qmp2Z7wPR4Y
1249
+ QowV8kgwHX8
1250
+ QpXUjqjewGU
1251
+ QqsPM1rd688
1252
+ Qru-q3ykC48
1253
+ QspDZ-DYs0Y
1254
+ R0XyMhCdSkY
1255
+ R3QabWdSsxY
1256
+ R5XePwAO4m0
1257
+ R7ZX64ASSe8
1258
+ R9KOp8PKhpY
1259
+ R9P4_3GEjS8
1260
+ RAMbIz3Y2JA
1261
+ RCrTM0fHg-o
1262
+ RDeLBp_-3sM
1263
+ RDkMkH4drhc
1264
+ REUvXBK7ypQ
1265
+ REzffEzpiUM
1266
+ RFM-LCECtmk
1267
+ RPTk00TE3Ak
1268
+ RQXCRoVV9Hc
1269
+ RSJQ7iFntRA
1270
+ RVG1EXFbRb0
1271
+ RVQcpNgh6sI
1272
+ RVluy0cjHbA
1273
+ RWjDH6Od5xU
1274
+ RWoOC8KvHEA
1275
+ RXEUWvpmJaI
1276
+ RY8e2Ivu4Ak
1277
+ Raiw3nozIoc
1278
+ RaqHo26ohYs
1279
+ Rb2fCxGGcHE
1280
+ RdGOK7ZAHMc
1281
+ RdhwDd8PW0Y
1282
+ Rdi5ExhmqHM
1283
+ RdlMCh2idHI
1284
+ RfP1AjOOtSE
1285
+ RfmgkgzNhYU
1286
+ Rh5qNYU-_jI
1287
+ RhcnVBgKxEg
1288
+ Rhn0fatp9PI
1289
+ Ri-M_Vo3w5A
1290
+ RjzuefWqVY8
1291
+ Rm5MIya_48o
1292
+ RovZoquZGn4
1293
+ RsC_d7GnZtI
1294
+ RsX1lwPnPPQ
1295
+ RvjlBL4A_8U
1296
+ RweXbb_OzBU
1297
+ RyiqQnCd7qQ
1298
+ RyszHongpf0
1299
+ S-imxWoyMD0
1300
+ S0MWAAykFuc
1301
+ S1MV9j0dPAQ
1302
+ S4Fje5FUgfw
1303
+ S4nfcw632Oo
1304
+ S5OzDdLlsUI
1305
+ SBUwUUIVwHM
1306
+ SCjA7BJDnEM
1307
+ SD-LjOboaE0
1308
+ SDCICtm9zXQ
1309
+ SEDDSCUJxK8
1310
+ SFE7NNxfbM4
1311
+ SH80ZuySDW4
1312
+ SLaZEauuEU8
1313
+ SNBMdDaYhZA
1314
+ SNcjWH6ZhPI
1315
+ SOUME9xzIxk
1316
+ SSGEoCsFoH4
1317
+ SSfhcpWRUrM
1318
+ SSnj8kkmNDI
1319
+ STKRq8VXzjw
1320
+ STKb-ai6874
1321
+ STWrgFYmkL4
1322
+ SVR3ZmdAV-A
1323
+ SYriZ4xtdDM
1324
+ SaznCPVAiJc
1325
+ Sd3QWZ76IZg
1326
+ SfaKxqo1NfQ
1327
+ SfkSpHzhZb8
1328
+ Sg-HIZ3qgtk
1329
+ SjNRtrZjkfE
1330
+ SjbFjIeSCf0
1331
+ SlJh09MsJ7I
1332
+ SnwbCVxeEVU
1333
+ SoeKlf4DcSE
1334
+ SqRfNG6yLEk
1335
+ SrLPH5590RU
1336
+ SsIDNPoW7q8
1337
+ Su8Q8XMQzIM
1338
+ SuYeKcei7Zo
1339
+ SvWCVOGF6vs
1340
+ SzjmpNTVH6U
1341
+ Szx43_ah4ys
1342
+ T-CAP-ULW_A
1343
+ T0qjkPboQXY
1344
+ T4kVhUwJZdo
1345
+ T6P9TCdWE64
1346
+ T9X3YhUWsDc
1347
+ TAdw9R0ku2o
1348
+ TFPS-iX0L4s
1349
+ TGCJQR2BZhc
1350
+ TIgRgGQ2azQ
1351
+ TJXW9hpOlnI
1352
+ TMjlO7UUubU
1353
+ TNEWst_1m4s
1354
+ TOT4eRF4oCU
1355
+ TOzkrCUSlFg
1356
+ TP2wD0LX4T8
1357
+ TQ2NfO8grLs
1358
+ TQ7NqpFMbFs
1359
+ TQHxvPooKZc
1360
+ TQzJKL_4l44
1361
+ TS5BXMeG890
1362
+ TUVeg-x9Za4
1363
+ TX0v0pIVKUY
1364
+ TXjwmCoRmhM
1365
+ TckoXQoHj7c
1366
+ Tg2yHK2Hnag
1367
+ Tm1RbCh9YeA
1368
+ Tq-AsJ8M5yw
1369
+ Trq6vcUOeQE
1370
+ Ts04-23URYA
1371
+ Tu22Y0kIzJ0
1372
+ TuoGVwBkTEA
1373
+ TxC0dIBPzZg
1374
+ TzzhAQLRwT8
1375
+ U-kFZbOf6Nk
1376
+ U1VkpKvSn5w
1377
+ U2LvmqmOEZI
1378
+ U2eqvs_MZGg
1379
+ U4auzU8E2ms
1380
+ U5Ze75nn72M
1381
+ UA-U6m9O5OI
1382
+ UBVzrTJEbS0
1383
+ UDvCqeXCI-o
1384
+ UFXt6O5cxjw
1385
+ UGRFE3vTcRA
1386
+ UIV2gt1Jzno
1387
+ UIr0uGUWN6g
1388
+ UJeKNl461d8
1389
+ UJjixKei0ag
1390
+ UKBlZt_JL-I
1391
+ UKc54igtdXI
1392
+ UL42vZliknk
1393
+ UOGLLo60dzI
1394
+ UPRvrLsqN8U
1395
+ UPU_yi9Nv3M
1396
+ UQO5Qcl8aAk
1397
+ URC125wpMS4
1398
+ UT7n0WY_6Ww
1399
+ UTYuVdJskY0
1400
+ UUT_nhIlR_U
1401
+ UUoBfrbl1Cs
1402
+ UV1luTyPOiY
1403
+ UVTsXdLyIsk
1404
+ UVaN02jNqgM
1405
+ UXEr-xcC46E
1406
+ UXRKQjlYUZU
1407
+ UXZm1KDE_qA
1408
+ UbyxFZSZZ90
1409
+ UcwU4ghl9Gg
1410
+ UdpTELIW7bU
1411
+ UeMQ_al9lDQ
1412
+ UeO44STvnJw
1413
+ UjAxgnvxIJE
1414
+ UlKZ83REIkA
1415
+ Ul_ZfzfHRek
1416
+ UlsgLGCVKks
1417
+ UmA3FpFowh4
1418
+ UoVJllDh6rg
1419
+ UpScMViXT1s
1420
+ Ux-ExovbpsE
1421
+ V1frWTXGVN8
1422
+ V2DvDDzrFqI
1423
+ V2XB2l3aFvc
1424
+ V2x33UdHq4w
1425
+ V3nPA6doMBM
1426
+ V5FsNyk9rDo
1427
+ V5blsv5pn60
1428
+ V5uV1vR2M6g
1429
+ V6egGoCrbIo
1430
+ V7uG1tWVqHM
1431
+ VC7lRZTxDng
1432
+ VDLUQlOR_nI
1433
+ VF0hNn-Yfn0
1434
+ VFJsMQnqZZ4
1435
+ VFeOyT16oEI
1436
+ VFf_xowsMcQ
1437
+ VGOslZT-f-I
1438
+ VJjiUcZCkzw
1439
+ VKfVhH35RL4
1440
+ VKnNVP23toY
1441
+ VNtYIBI3BFc
1442
+ VOFQb8sVnxs
1443
+ VOomWcgrHis
1444
+ VRu86oG1hVY
1445
+ VSRuncwwJyQ
1446
+ VTsKS3ccd5M
1447
+ VVV3XeAevc8
1448
+ VWc3ezusNTg
1449
+ VWrYbF797LI
1450
+ VYE91m6Rli4
1451
+ VYKXayozHFU
1452
+ VZ0-gccLaNU
1453
+ VZki-LyHI0E
1454
+ V_lZ61OB0EI
1455
+ VcLWubfmJcM
1456
+ VcYUWSBo4i4
1457
+ Vcvl5piGlYg
1458
+ VePpQBCbKBw
1459
+ VfLSoyinXmo
1460
+ VfmcO0gQ8B4
1461
+ VfxI-6_LL54
1462
+ VgrW-fB3EXI
1463
+ Vm2mKdh_VUc
1464
+ VmPl5QClxYk
1465
+ VpKnSOS6NGo
1466
+ Vpw09qKQal0
1467
+ VsOismDNYjE
1468
+ VvXMMtldJU0
1469
+ Vw7hC4jpglg
1470
+ Vwxft_tqrcg
1471
+ Vy6PKQy2OI0
1472
+ VyJCLVAj-vE
1473
+ VzHAbg24fRs
1474
+ W-7t8Zho4AI
1475
+ W-_X1HZM7ys
1476
+ W0Mw5dfZoyg
1477
+ W0caVAMB7LQ
1478
+ W1tOTi9L7Hs
1479
+ W354uz6KPJE
1480
+ W3H5b9yZcOs
1481
+ W3XADagE6P8
1482
+ W42cARRDRe4
1483
+ W4PKfrQ2J5Q
1484
+ W8mQ41XaZvo
1485
+ W9okU7bAE-U
1486
+ WA1L8vXkSKQ
1487
+ WA9sMdQzdiA
1488
+ WAl7wIw5ReQ
1489
+ WBZ3das2pd0
1490
+ WDialA8RHEg
1491
+ WIiuU7Fd-KA
1492
+ WJBgKbe1ZyU
1493
+ WJFdyqLo-pM
1494
+ WJaW32ZTyKE
1495
+ WJlK8D9Vy1k
1496
+ WL5IJ4JVjrg
1497
+ WLVKRBitiY4
1498
+ WMl7MSsJMGI
1499
+ WP4jOqfpJx8
1500
+ WPIIgluc2vA
1501
+ WQ2QwrfZHTk
1502
+ WQ4W-UqaaMo
1503
+ WQXM3hU-7vk
1504
+ WQiGCGxYMm4
1505
+ WR4vkITJckY
1506
+ WT2aDvMsZF8
1507
+ WTxTntSi6Jw
1508
+ WUx-OqSLBak
1509
+ WUzV_mDcBmk
1510
+ WXKNBguF0a0
1511
+ WZQi3LOqiNc
1512
+ WZw5Vz_imPM
1513
+ WcLq2oABDhA
1514
+ WeHTlU8efMA
1515
+ WgWkOiDD4n4
1516
+ WiILKjGKveI
1517
+ WiNgIQ1pOlE
1518
+ Wj6MkLWm8dQ
1519
+ Wmt5FzPTuXE
1520
+ WnQCWE22AYs
1521
+ Wq5ZIu0UZmE
1522
+ Wqb2ZPi6KyI
1523
+ Wrb8tfqYPzw
1524
+ WsQ7ysVt-0A
1525
+ WssviVcyncg
1526
+ Wx9v_J34Fyo
1527
+ Wxp1QG7GIBs
1528
+ WysUzNnT4i0
1529
+ Wz6DxjLZSbc
1530
+ WzyI1gx_PHI
1531
+ X-fZ5fMfAv4
1532
+ X-irPLY6oLk
1533
+ X2ac5fHYZ3I
1534
+ X3bcvqrTTJI
1535
+ X3tI7SRuvhw
1536
+ X3ztiC2hUXM
1537
+ X7Jd6mI9iHM
1538
+ X7WI5_UUpys
1539
+ XAJMmm6sLQs
1540
+ XAhljQgieEs
1541
+ XAlAgC7rYug
1542
+ XBF-Pd_asag
1543
+ XEHxXWpF6qI
1544
+ XIXA-Kqb_Os
1545
+ XKCWJuWup08
1546
+ XL6m6Zl2ejc
1547
+ XLSJ7NIldNo
1548
+ XPgdPEYr648
1549
+ XPwYTyTbCag
1550
+ XQy5b5mlVZ4
1551
+ XUxgC9KYoPE
1552
+ XVtVVuER3W4
1553
+ XXKVwqZS1R8
1554
+ XXgJZXUPZXM
1555
+ XZ70Li8v68o
1556
+ XZxl6h-TxQ4
1557
+ XaGss5gNQEM
1558
+ XabAlfY1TyQ
1559
+ XaxSxkApUQc
1560
+ Xb32P_VWh7w
1561
+ XbDyn-2_xIc
1562
+ XbNcsgEX2jc
1563
+ XbtdjM8wGv4
1564
+ XcZaQG7wUpA
1565
+ Xdlo2HW5jwk
1566
+ Xf6xJiXV3AM
1567
+ XfMoY5WSlvc
1568
+ XkNofrvAt2s
1569
+ XkX-zJa_9Wo
1570
+ XlQzQ55BA0c
1571
+ XlYXYqwEeJw
1572
+ XoVZxH4CwMQ
1573
+ Xq-YIJA61W4
1574
+ XrGuIdJ0Ckg
1575
+ XvfpyrdeZ0s
1576
+ Xw2PEuhphPk
1577
+ Xwu5PSZfocc
1578
+ XxqpGCkrF8g
1579
+ XyvlWUQAkxM
1580
+ Y05wSSdslyI
1581
+ Y1QnypwCPZo
1582
+ Y2zmCgmDVqk
1583
+ Y3sbt-6ndq0
1584
+ Y65tbaBs88M
1585
+ Y6Mn-d9QMxY
1586
+ Y9anPwwkuU4
1587
+ Y9rkKtK1b44
1588
+ YC3Hhxqbof0
1589
+ YECIYrmXH0o
1590
+ YEgE-m8kzpM
1591
+ YGdNSqa_clI
1592
+ YGwLb86Uso8
1593
+ YIDhGzp8Z2s
1594
+ YJkS7QqeYPw
1595
+ YMoa5JpjEtM
1596
+ YMqKHYZHQD0
1597
+ YO-oamNqsA4
1598
+ YOD6jIFD5aw
1599
+ YP_35HEWcfc
1600
+ YUJa7_i3Cn8
1601
+ YUxb5mw96eQ
1602
+ YWpX7tlXLB0
1603
+ YXPEXwjYpGA
1604
+ YXfpaJVsVa4
1605
+ YYTcoNCWy8c
1606
+ YZBLFRe-G0A
1607
+ YZLGkfy0_oM
1608
+ YZME4lTWBHY
1609
+ YZp4fNMECVg
1610
+ YcGOV1iUBlE
1611
+ YeIn1rFbTuw
1612
+ Yex9CPZzezk
1613
+ Yf_jdF01azE
1614
+ YgCBzXR63l8
1615
+ YhoYie6_la0
1616
+ YiPEZWaxSXs
1617
+ YjTCns1fFmM
1618
+ YjwTPflZm70
1619
+ YlIl427ZdHc
1620
+ YnRgf2UNBXA
1621
+ Yo-CEXgHwkk
1622
+ YpmKbkU-X_U
1623
+ YtiVmh7UKOw
1624
+ YuLSfLZ9apM
1625
+ Yui2Msy8X1E
1626
+ YvRRXziXpMY
1627
+ YwqTTdUEJGc
1628
+ Z12xymgfH9k
1629
+ Z3r9tnbK_iU
1630
+ Z5SpCVtoOdw
1631
+ Z75PmkL_UaQ
1632
+ Z8iLMnTX6OQ
1633
+ ZAX4LKUeAVE
1634
+ ZAYK9cMiSbE
1635
+ ZBcOyv8LZ8s
1636
+ ZDfRJiIODMs
1637
+ ZDv9njERj0s
1638
+ ZEHCzjk0Hrk
1639
+ ZFnr_mTTL0E
1640
+ ZGzQUBDGd-g
1641
+ ZH6357PHRCc
1642
+ ZHLsDRxEMGs
1643
+ ZPHfpA_4uhY
1644
+ ZQPrD6cbqHQ
1645
+ ZQQwgYaQh1w
1646
+ ZSEV1ivNUWY
1647
+ ZSTo8stxfG0
1648
+ ZUlkRGgSLfI
1649
+ ZXMx5C9oFk8
1650
+ ZYSjPZUqLdk
1651
+ ZZIjgZYnP6Q
1652
+ Z_TNQsWm8SM
1653
+ ZcO5PLLXLy0
1654
+ Zg4zFxyeLMQ
1655
+ Zi2RoikofDs
1656
+ Zj7U7R-2fcs
1657
+ ZjmynEwugnk
1658
+ Zn8iU3-RNL0
1659
+ ZnpqfpPzQAE
1660
+ Zot-rd9KoYE
1661
+ ZoyUo4ZY-70
1662
+ ZpCK7G0LQAc
1663
+ ZpNQS6CX7FA
1664
+ ZpbEtTNDQBM
1665
+ ZptziXSxpx0
1666
+ ZsMJ8wO1YKs
1667
+ ZsY6kAHzdwM
1668
+ ZtXr7bckLyc
1669
+ ZvAaTKbPzH8
1670
+ ZyOqaUFxojc
1671
+ ZyUCcfMcmyE
1672
+ ZzVhwsetzDk
1673
+ _1B60L4m60M
1674
+ _4bzRoRn260
1675
+ _4r4LIX84TM
1676
+ _5O0_4kkKSg
1677
+ _5r3yqpaNuM
1678
+ _6AK5vp6nDE
1679
+ _6ck3EDlssw
1680
+ _7RMd0g9FtQ
1681
+ _A-pZH-S4jk
1682
+ _AvNT3vyzr0
1683
+ _BxhdmIGNys
1684
+ _CKFycGHzLo
1685
+ _F6bq0l18Ng
1686
+ _FcVS3nf3Qs
1687
+ _H71S7ar21o
1688
+ _ITluulB74Q
1689
+ _K-JEM9RNeA
1690
+ _KIjLRuf4NQ
1691
+ _LR1NtZOISg
1692
+ _OhqC6w4C28
1693
+ _QRahNSxQfc
1694
+ _SYvYBxt_Dg
1695
+ _UVuXcclWM8
1696
+ _WN-6t58HdM
1697
+ _Xo-Cy_okCE
1698
+ _ZZVgjbePRI
1699
+ _ZdfwflsC3U
1700
+ _aOcddJCrSQ
1701
+ _ak7BooJlnk
1702
+ _bBrj6QBPW0
1703
+ _cg2nSAfeUY
1704
+ _czQyx6JYgc
1705
+ _f2hDpHsQlg
1706
+ _gJN7I0a9XU
1707
+ _gR3p72XEMg
1708
+ _gm8Jp_4a5E
1709
+ _hE7CQmHPNI
1710
+ _hYhPQTHOMg
1711
+ _kUyGB9O_Hs
1712
+ _nwF3zYpfQ4
1713
+ _piu_0WXnOA
1714
+ _rWxLXmPdc0
1715
+ _sWO1yvsmmo
1716
+ _t61OZ5dN9Q
1717
+ _uV1LuL6yDQ
1718
+ _y9aHEd08LM
1719
+ _yKGZu_8gyI
1720
+ _zYWKwpARf0
1721
+ a3Ah-REb5vc
1722
+ a3mNe6zQS7Y
1723
+ a5CAVvzL2GA
1724
+ a5lUNW9wlpY
1725
+ a8-ySFmij_I
1726
+ aA1H4iLap44
1727
+ aB0Ku9J6jHg
1728
+ aDURTM8MwIE
1729
+ aGPO0OB2_Ak
1730
+ aJAQVletzdY
1731
+ aLPYGw9HubM
1732
+ aN5gtrkwgb4
1733
+ aQM3Hbvospw
1734
+ aSDRWUh7nqQ
1735
+ aSF8LfYNlSM
1736
+ aSp8Pf6zXvg
1737
+ aSuaW_0lLYY
1738
+ aTigt0mmyiM
1739
+ aValNp9ADko
1740
+ aVstEGR5jt0
1741
+ aVtOmrULE2w
1742
+ aZn-H9uoYk8
1743
+ abAitsu2-iA
1744
+ acWlXpQu83s
1745
+ ad7p5nSZbBg
1746
+ adXUEwBrrl0
1747
+ ae527aiePIg
1748
+ afqjRWOtCzM
1749
+ ajI8zq2SedY
1750
+ aluYFRd96Kk
1751
+ aqz-KE-bpKQ
1752
+ asFNxO3cXMY
1753
+ asyKEIv1pZw
1754
+ atd0dgRkl2k
1755
+ au5oFT_zkYU
1756
+ avITb-TZWOQ
1757
+ ays4JisCVAw
1758
+ azPa3uccg2I
1759
+ b-lZA6gHWg8
1760
+ b0dX-hS1mUY
1761
+ b3wy7s_QvOY
1762
+ b44IhiCuNw4
1763
+ b4jlwhPxTpo
1764
+ b552H9Gclo0
1765
+ b6DszEFrkSQ
1766
+ bBpQgQ416dY
1767
+ bCbKYU8D2QA
1768
+ bCqq2y9Ge-A
1769
+ bDjCnFvz11A
1770
+ bHeksIj7dpM
1771
+ bI0kawdOOXs
1772
+ bKqwRavOIVY
1773
+ bL2qA6Gt6Ko
1774
+ bMtTAQHLc6A
1775
+ bPnX7JwHQEg
1776
+ bQ5gacQY7y8
1777
+ bRh_KJ6yyGU
1778
+ bSySiLIcnzs
1779
+ bUAg8d3aQZU
1780
+ bWh4bNRqaK0
1781
+ b_PbouAwYIU
1782
+ babTVMWIosM
1783
+ bc15oBT5Nec
1784
+ be0sJh-pxOg
1785
+ bebrJ-o4d2E
1786
+ bg6f32E9vN0
1787
+ bhMSZQLSp2M
1788
+ bi3iqJykwEo
1789
+ bj77Ljq7kbA
1790
+ bjNvrJwE9Dc
1791
+ bk4b1P_IHZ8
1792
+ bkUDUnE6aP8
1793
+ bkZBYQ7mnrQ
1794
+ blCUScrxzdo
1795
+ bn5OSGM8oKc
1796
+ bqfA6lRrlaU
1797
+ bqfAc6X_uUY
1798
+ bsNztQbe4Ic
1799
+ bugT5lGfl9M
1800
+ bvCbqmHHQD8
1801
+ bzDZY35Vacw
1802
+ c0IrO0Rnlls
1803
+ c0XSC8b_9hU
1804
+ c1Fjrbt7peU
1805
+ c1H__IGGs8E
1806
+ c30s6OnCApw
1807
+ c41Mm_I_pBc
1808
+ c62KFa9jY8E
1809
+ c6gVQ2e1zEs
1810
+ c7baTdyHv8g
1811
+ cBXqfQzrsEY
1812
+ cCLOtOW3mIk
1813
+ cDJ_DVILBuk
1814
+ cFhONVldyE0
1815
+ cG_MFoGh_EM
1816
+ cH3DOOXFE24
1817
+ cIpXNRmZtVc
1818
+ cLMUQZr0TOg
1819
+ cLVQSb0l2Pw
1820
+ cLcmBWC4FBI
1821
+ cLofgTNATIQ
1822
+ cOtoozzRy8g
1823
+ cRiOZTv0dzs
1824
+ cSEqa8cLqss
1825
+ cSp0k6jEvY4
1826
+ cTFi0WEdtvo
1827
+ cUkZLttONGM
1828
+ cVezZhj7sC0
1829
+ cWZD_1CxIJw
1830
+ cWdvVQ7hIa8
1831
+ cXpb2qzMlJ8
1832
+ c_Ok8VutSbw
1833
+ caSPzISFhiM
1834
+ ca_kO_J5rWg
1835
+ cafMvJshI1c
1836
+ cbqshch5yAA
1837
+ ciV5Vi-3Adw
1838
+ cmA7Ze4KPd8
1839
+ cngWph0hiVo
1840
+ co-tsFhaeRQ
1841
+ coswCgQ_Zwg
1842
+ cpAUXdNPnQs
1843
+ crJ3KpoMxdk
1844
+ cvlLKmzsEBc
1845
+ cy9rx19dujU
1846
+ d0am7-lXaus
1847
+ d0npGPi3UPw
1848
+ d2b921R6Q7U
1849
+ d4gLpU_fqmE
1850
+ d9ShdxJRArw
1851
+ dBHgd6MNyyE
1852
+ dCmfHeROKIE
1853
+ dFxMzUUWLeM
1854
+ dGwu92AU4TU
1855
+ dHx4lFPqPiI
1856
+ dIFsZ9uTrpE
1857
+ dKfNJ2umK4o
1858
+ dMN2CeulM6o
1859
+ dMjan7yZ4gU
1860
+ dMxd2IrTF7w
1861
+ dMxrAgNbTRk
1862
+ dMyTRV4x50w
1863
+ dP4U1yI1WZ0
1864
+ dR4CGlYvT7g
1865
+ dSfIxFW_X1c
1866
+ dUKtnNKWV44
1867
+ dVeMgCpj-4c
1868
+ dZCCdLDE8gI
1869
+ d_CTrbVqWW0
1870
+ d_oCEre1NJ0
1871
+ daIt5KbxFNY
1872
+ daTulcsI2RU
1873
+ dbkgiCgapGc
1874
+ dd03Lj8KOjA
1875
+ dd9EimkKEZw
1876
+ dk2FVz_B5ls
1877
+ dnQnF9apUF8
1878
+ dnYUpixA8m0
1879
+ do2lClbRBmA
1880
+ dpHBJkblp2Y
1881
+ dt1MS1YmxyY
1882
+ dt1XiYVENuk
1883
+ dt9To7mMjl0
1884
+ dt9tqCG8neM
1885
+ dw6X5-D3tmY
1886
+ dwd_Z917XGI
1887
+ dxWIZ0ODlZE
1888
+ e0Q9GzQzBhE
1889
+ e1kPOlccTbM
1890
+ e3rjGvtVXeo
1891
+ e6MyukRaKOc
1892
+ e7jMlBiYy98
1893
+ e7z4uzyeRzA
1894
+ e82tT1ge9Ak
1895
+ e89XopekEJI
1896
+ eAQBVytibCc
1897
+ eCKD8fP2aao
1898
+ eDGFEV4CtB0
1899
+ eF7TdzaX_Jc
1900
+ eFUL4yP0vqo
1901
+ eFdjgis1E5I
1902
+ eGYkD0e411I
1903
+ eILksASDf9w
1904
+ eIXNlBbw54g
1905
+ eKl16S8gUXk
1906
+ eLX1KG3FnBg
1907
+ eNtpsXBrhk4
1908
+ eO8hhdMv7P4
1909
+ eOJ7XVQlnB8
1910
+ eObAEb5SBmg
1911
+ eQY39NuSXHU
1912
+ eQcmzGIKrzg
1913
+ eQjsCvqh1_Y
1914
+ eRU1XIwXCAE
1915
+ eRvOQ8MsyV0
1916
+ eSR9swuM61I
1917
+ eT5IGtWmQ-M
1918
+ eVXJaSgEf74
1919
+ eVz41Qm-5aU
1920
+ eX3PtJN52g8
1921
+ eXeWNtyjgPk
1922
+ eY5c3ce48Rs
1923
+ e_7T20B65hQ
1924
+ edRk4JmnLkM
1925
+ efIVRw-eVzU
1926
+ efooj84xalQ
1927
+ eg0bYuuHzkI
1928
+ ehMbGtUiYh4
1929
+ ehoNb1i6vY0
1930
+ ejrSSx2FMhQ
1931
+ eke0xp4rprM
1932
+ ekxjCWMq8Z0
1933
+ emqCYr2xVGw
1934
+ eoHw0W_CzX8
1935
+ eoN2Xrs6cxc
1936
+ eoWIjARiH6Y
1937
+ eowka-cMoxM
1938
+ epfVelQrqX8
1939
+ eppNMltmm-o
1940
+ err7vaMQ4z4
1941
+ esVois0s3qg
1942
+ esnMNkFAG1U
1943
+ estUu_XdB7k
1944
+ etnFlNJ-uys
1945
+ eub0JuAMSiI
1946
+ evORVtwC0zk
1947
+ evnEcyr9dIw
1948
+ ewQ3FI_u5nY
1949
+ exsaT4HrbhA
1950
+ exvJz3JPHuI
1951
+ eyBEIx7fCfY
1952
+ ezQjNJUSraY
1953
+ ezxm9w77JKI
1954
+ f5kTVt7QnK4
1955
+ f5mk7tpuEs4
1956
+ f5ud_4pbXPk
1957
+ f7HNmm8MHMI
1958
+ f9oet4cvYf4
1959
+ fAa51WqtOT0
1960
+ fAtWwadP6CY
1961
+ fDZ8fFOesDQ
1962
+ fJrNkspUMno
1963
+ fKHcxXH3f6k
1964
+ fLQ-pj8lteY
1965
+ fLc6CPIre_4
1966
+ fO1dl3Kzz40
1967
+ fREa-rfs8aU
1968
+ fSSzNXPVvYc
1969
+ fUQ-OTqjp9c
1970
+ fUzh2Hu4JoY
1971
+ fV1xRX3XGKk
1972
+ fWnDlFMfOA0
1973
+ fX_sxgqAKkg
1974
+ fXuqsnqsfGE
1975
+ fYHqUK0tqH8
1976
+ fZ8S5N2akUE
1977
+ f_LHuvCZLf0
1978
+ fb8d5RZqzA0
1979
+ fceagC5I1Yk
1980
+ fdANKfoxm6s
1981
+ ferxnIAT2Go
1982
+ ffVL9wvWfgs
1983
+ fgIfH_85nBw
1984
+ fgzKbL4HjCs
1985
+ fhXPsdrWJiA
1986
+ fhrwGiZrvio
1987
+ fjW_WL3KLUg
1988
+ fk3Cq0mR6_4
1989
+ fkJt1i4aO_8
1990
+ fkYqJFYFYXI
1991
+ fk_RkZortw8
1992
+ fnArfARx9wI
1993
+ fnW2Qy7uY9Y
1994
+ fo65xXZqfoA
1995
+ fsUkET-XhzM
1996
+ fu1ZVnk2Gqk
1997
+ fuRU10ocuds
1998
+ fuZ88PJ7Lsg
1999
+ fvtbdq3WiyU
2000
+ fxIAaegID9M
2001
+ fyFOwJRr3I0
2002
+ g0UDwv1vPgw
2003
+ g0cy2szFR4Y
2004
+ g3pERE3VsX8
2005
+ g4zf6FeVoxM
2006
+ g8DbS9Ochow
2007
+ gAjMBetApiA
2008
+ gB-ZlM-aUTE
2009
+ gB6vLAjfMdc
2010
+ gE9soy7r-Ms
2011
+ gFZ8NPmkPpg
2012
+ gFZj_2voVyA
2013
+ gGZq_aR_jyg
2014
+ gMZp4NeJq5w
2015
+ gOprZJ2CbR0
2016
+ gRO6JH35Cdc
2017
+ gROnZPk4dnk
2018
+ gTDprJn164c
2019
+ gTPaMD7qhek
2020
+ gTmG636eAYY
2021
+ gTzhAelL1go
2022
+ gV7XWdt72Vo
2023
+ gV8icTgcuyU
2024
+ gX0AEmOrKsY
2025
+ gX6r9BWwCOI
2026
+ gZyG2_WKQvc
2027
+ gaUt3gTwwzU
2028
+ gb13T2EYzvM
2029
+ gcsL5WUXoYU
2030
+ gdb3IbS2SGQ
2031
+ gksr08xM7dc
2032
+ gmKD-PSkATI
2033
+ gn_UhhjygOk
2034
+ gqxjTyxDC-Q
2035
+ gs2r0YmX7rk
2036
+ gvYPX1ja9gU
2037
+ gwysz5CtZLg
2038
+ gxYTI3KmFk0
2039
+ gzfKeagBy1k
2040
+ h-h1ceF9ToI
2041
+ h0Jgoc3tosQ
2042
+ h16CeeTSV2Y
2043
+ h3sdAzf02O8
2044
+ h5ZU5xukqyE
2045
+ h7HDtT_cOs4
2046
+ hB5cFvlTYWk
2047
+ hCc4qd-6h_Y
2048
+ hFj2tLk_poo
2049
+ hHHYqKqySy8
2050
+ hIkJ1Q69eJI
2051
+ hK4VPzJYU94
2052
+ hMmYNZgItKc
2053
+ hN4A9M4qXy8
2054
+ hNhlfyoVlLQ
2055
+ hO4aTFDeokA
2056
+ hOloWe3oYR4
2057
+ hPUs3kB39sA
2058
+ hPhJlvpNz2Y
2059
+ hPt5JG9kGUc
2060
+ hQyEUb5DS2o
2061
+ hQzC6_3S6xg
2062
+ hS-re7JJ8ts
2063
+ hS2fdP1bNV0
2064
+ hSUDE3-e5_0
2065
+ hUKXRN4RzpA
2066
+ hXtL9Oe2OTI
2067
+ halJEOj2RP4
2068
+ hasaLmOLJ8k
2069
+ hcl4vYMfvYQ
2070
+ hfMHcdPo7fk
2071
+ hihIrNRfnpQ
2072
+ hnAqGpkvYIM
2073
+ hoAGRdA2Zpo
2074
+ hpK6FZbxjgs
2075
+ hq-1BIaFjGc
2076
+ hqbW-EaOcUY
2077
+ hr5Px8dD_iw
2078
+ hrb-HL0UUpM
2079
+ hrhBvOz8ius
2080
+ hsfgj1OWX0Q
2081
+ htCPtKDBmKw
2082
+ htZAga_nSW8
2083
+ hv4NNsPaL44
2084
+ hw0VaaAhfjM
2085
+ hx0rraflBpg
2086
+ hxHjQZSANwI
2087
+ hxa7Kd8P-oM
2088
+ hykLNP-Cpsc
2089
+ i-vmCEgFW5g
2090
+ i0CP1dmr4ng
2091
+ i1-9WDuXJGY
2092
+ i17JKd_eCAU
2093
+ i1srQA7ocZs
2094
+ i2l72jO-otU
2095
+ i3DQ9UOs8nY
2096
+ i4jX18-l9pI
2097
+ i4oLqHRxoZ8
2098
+ i5HYNnHtcdY
2099
+ i5z06C9Mi00
2100
+ i9WbGqPeY8k
2101
+ iAWKPOyYdXo
2102
+ iCPxwkaCXLk
2103
+ iCd5W4gwJsI
2104
+ iDJVid5u0Mo
2105
+ iDWMIMCMNiQ
2106
+ iFJ2pJph-Do
2107
+ iIw0Hfm9h7E
2108
+ iJJxd-XGQMo
2109
+ iKAYJ-makHU
2110
+ iN0gsOIWQgw
2111
+ iNTSOGM5rCY
2112
+ iNhHBJkMz58
2113
+ iPDuJS2QUrc
2114
+ iSYvDl5iBFQ
2115
+ iT5-jVpP_TQ
2116
+ iTk4dutvfsE
2117
+ iVBD4cmI3_E
2118
+ iVbefrog1uI
2119
+ iVu7DNjFgWk
2120
+ iWRrLD7H98s
2121
+ iX1P6XAvtlM
2122
+ iXUwLs4kNvc
2123
+ iY2XmAG6Rl0
2124
+ i_QZJaPorpU
2125
+ ib2KHcsUS18
2126
+ id1m2sLzwrE
2127
+ id8Uq_339xM
2128
+ idAzpl13_sc
2129
+ ihpS0LTtWjw
2130
+ ijPyu-jUJ5Y
2131
+ ijlyytCZ_RI
2132
+ ikDYpq4uESA
2133
+ io3xwOBD4f0
2134
+ ioFH4kyF0kY
2135
+ ip0yFdCqbkY
2136
+ iqm-XEqpayc
2137
+ iqmkldsIxkI
2138
+ irKLUxa7jkA
2139
+ it0EYBBl5LI
2140
+ iuqe74uT7fw
2141
+ iy-uKpGZmrQ
2142
+ j-0L20FH2Zc
2143
+ j-nCvuPZoWo
2144
+ j2WVRXdVu_4
2145
+ j2k6EQOoRzU
2146
+ j3BVnXHPjyw
2147
+ j6FjjenQyko
2148
+ j7LhySzyh7k
2149
+ j7XDYxypFb0
2150
+ j921XOqiQ7o
2151
+ j9Q2J2MDctA
2152
+ jA4WY877mTk
2153
+ jBox8_bAoTM
2154
+ jHVnt6VROM8
2155
+ jLLVBH5ZZG0
2156
+ jLRyB9Hz7U4
2157
+ jNyPJ1LQmiA
2158
+ jOj_NSDXMz0
2159
+ jPxamG3f5TE
2160
+ jTaFavHwbiI
2161
+ jXqb7dbUBHs
2162
+ jYEIkpTjpwI
2163
+ jZa3FK_T_Z4
2164
+ j_k1TZLK2mQ
2165
+ jbzpyRmU3YA
2166
+ jcFgCdlsPjI
2167
+ jec9rBFqcwc
2168
+ jfcrY85C_-k
2169
+ jgT5X37XW3w
2170
+ jhUnM7jekNo
2171
+ jhaa9bIPuOY
2172
+ jpAig_5u60Y
2173
+ jv-dZw3ITJs
2174
+ jvRleA3lWBw
2175
+ jvr7UJI47UM
2176
+ jwBi_Q9ffys
2177
+ jwS75RtJJBc
2178
+ jzbqbu-0bxc
2179
+ k-pmfynqbko
2180
+ k5QJ8s3qUyA
2181
+ k6Hf0WGquNM
2182
+ k9Kb4Pwu3ok
2183
+ k9zYM3XqNps
2184
+ kBLs0CsEWEg
2185
+ kEQyvL6El-M
2186
+ kHBSmEfwZZs
2187
+ kLdPQfLv1g0
2188
+ kM8iZqP4GDk
2189
+ kPUqlB13Kn4
2190
+ kQLhEtSx0NA
2191
+ kYVd_HIkGw4
2192
+ kYXiegTXsEs
2193
+ kaBI3TvA7vA
2194
+ kdIG_ZsrdUs
2195
+ kdQPXCcV2K0
2196
+ kdo0b2eiXVI
2197
+ kfssQHnlHjM
2198
+ khm7702744s
2199
+ kl5eVK9kY_c
2200
+ klh8JFoo4Og
2201
+ kmTRmjeoKYA
2202
+ kp33ZprO0Ck
2203
+ kqFzRSwX3Fs
2204
+ kqsmCUo3xEQ
2205
+ kr5Ot5x8mmc
2206
+ kvOzjIRylsM
2207
+ kzgohxtf3hg
2208
+ l1wHRYkFhZg
2209
+ l29q7OxKMmM
2210
+ l5bkGI-CGFI
2211
+ l5hgKlBq2aE
2212
+ l5tn1sJ0fKg
2213
+ l6iQSCD9UeI
2214
+ l7OIqpr56NY
2215
+ l82uiWKCgF0
2216
+ lAzL_47Wkbc
2217
+ lB98olcaEm0
2218
+ lClz0KqwrFU
2219
+ lDJEC7OX6AE
2220
+ lHakoK0j9qA
2221
+ lIRpgkLK7r0
2222
+ lJYwN2X7IbI
2223
+ lK44daVVh2w
2224
+ lM1Zmq0Lcd4
2225
+ lOwZ3mwlenw
2226
+ lSGfxPD4nR0
2227
+ lZZgjJz-SEw
2228
+ ldiJbsXZgvg
2229
+ le3cBRlWSE8
2230
+ lfrBVdmeUE0
2231
+ lgWY-gjEYW0
2232
+ lh_ZUt-O34Y
2233
+ li7HdK2vIlw
2234
+ liKbDWr0RzI
2235
+ liVQZnSAv28
2236
+ llj7LzTULog
2237
+ losLXh9sDc8
2238
+ lpShGGv6iJI
2239
+ lqbSxAvUNpc
2240
+ ltPAsp71rmI
2241
+ lxBYJOEbU_g
2242
+ lxE9lqRrcuo
2243
+ lxQjwbUiM9w
2244
+ lz1RY_CNWeU
2245
+ lza-61vEfcw
2246
+ lzniYTLmFkc
2247
+ m03Bn3jgP48
2248
+ m0EwquC6wBU
2249
+ m0svbKxpLhI
2250
+ m0ttpozYW14
2251
+ m1lhGqNCZlA
2252
+ m3ZogsjvmLA
2253
+ m3q4bKFCkio
2254
+ m3q7itlxq14
2255
+ m4yEAVUxsFM
2256
+ m5_41I_BLc8
2257
+ m9qPsbp-l00
2258
+ m9y0Kt9UFYc
2259
+ mAUNqn8QAFU
2260
+ mCghaYzVDxw
2261
+ mD24h-bbdMU
2262
+ mEP7QCH2oYc
2263
+ mETDq4gwIkU
2264
+ mFRkTyvuamE
2265
+ mFu85lirRfw
2266
+ mLTM_vEz1jU
2267
+ mMiiyP2W-qQ
2268
+ mQu-g8K7qtc
2269
+ mQyL3LgJwXA
2270
+ mRzqtElhGsY
2271
+ mXBHUYIXQAU
2272
+ mXNd6zUVwP0
2273
+ m_D83abmA4Q
2274
+ mbKfbtQvUgQ
2275
+ mcZfJHqf4KY
2276
+ meka3_pUVqc
2277
+ mexPA0ocnnQ
2278
+ mf8vuUJoNaI
2279
+ mfuwTh61hMA
2280
+ mfzWMIEVrFk
2281
+ mg7CQGLjRzE
2282
+ mhZJxmbSSt4
2283
+ mhxEcl-85pM
2284
+ miGwjQa0txo
2285
+ mjI_IyzUye0
2286
+ mp_aq18zg38
2287
+ mqSucLmIFeg
2288
+ mtXfzd53wRQ
2289
+ mu1XN7ABANM
2290
+ mw4GQ5jhL6g
2291
+ myDLLgryIs8
2292
+ myGaZRw-0oU
2293
+ myaOG-5N12M
2294
+ n-EpKQ6xIJs
2295
+ n-Ptyqo4lE0
2296
+ n-mmxFdva4w
2297
+ n1WR4pPT1Jo
2298
+ n7YJE14tLLQ
2299
+ n7Z6KyTeouY
2300
+ n9Gl1hBgjIU
2301
+ nAyLvwIcnRc
2302
+ nHEr8-LtfUo
2303
+ nHU621TjCi0
2304
+ nISa7ahsQVY
2305
+ nMIE9IKTV6Y
2306
+ nN4fDhAcGTM
2307
+ nNgIi4eJduY
2308
+ nNuk96Rw2Ks
2309
+ nQeMM3bqM1M
2310
+ nQgkkH-o-aE
2311
+ nQmB8u7aBZs
2312
+ nQpuGwWyFQ0
2313
+ nSsQFG8QzcE
2314
+ nWo--dSu9bs
2315
+ nXcU8x_xK18
2316
+ nZQ_jnOVFeU
2317
+ n__HtO28X8M
2318
+ naFR4znnS_0
2319
+ ngdwHwnLCt8
2320
+ niKRw8zeYa8
2321
+ nihu7deKG9E
2322
+ nj-YK3JJCIU
2323
+ nkCgvEXj7FQ
2324
+ nky_VSE43Hs
2325
+ noYQhtNG-RA
2326
+ nor0P6jwoeg
2327
+ nsqOggafySQ
2328
+ nsrEmJ_19v4
2329
+ nu3p9JQ4ykM
2330
+ nujU66hnemE
2331
+ nvGGjCIIrm0
2332
+ nwyoo3oYYpE
2333
+ nxk8alj07sY
2334
+ nyes4M2CbtI
2335
+ nyxYzQVqo20
2336
+ nyyS0FSztKc
2337
+ o0lvhDX6DXc
2338
+ o1zo4BYFdtU
2339
+ o3z2CwnY_5M
2340
+ o4R1-TLkxBs
2341
+ o5q1ne_uNE0
2342
+ o5ufkJMr24c
2343
+ oBRZ8sD-OjE
2344
+ oCM304tbwcM
2345
+ oKC8ikzEFIA
2346
+ oMbvC_siQyc
2347
+ oNGKZrLIO0U
2348
+ oOdNHtF_s5o
2349
+ oRvkMz0FXtw
2350
+ oSBD9lej7oA
2351
+ oWVfSS5m5LY
2352
+ oWqLRywhozk
2353
+ oYKlQPSZr9A
2354
+ o_EvSqIz1EE
2355
+ oarLQY6VJzs
2356
+ oe45d8WFc20
2357
+ oe4VZ1YDL9Y
2358
+ oeFSoHWiLBI
2359
+ ofWA7ERRwzs
2360
+ ogOqpmvv6eU
2361
+ ol3tAxnNccY
2362
+ om1FNUWg4yo
2363
+ oo5gIrWn9tI
2364
+ oonc4u-Adbc
2365
+ oun9ZWMYYVQ
2366
+ owZYdzNJSUo
2367
+ owqMWjBPCW4
2368
+ oxQHiGgFQ2A
2369
+ oxsRS9fialY
2370
+ p-xcYP1ef6M
2371
+ p1lHlsF1LeM
2372
+ p2tdlkmZ8D4
2373
+ p4Q2mwsvlpQ
2374
+ p9ijo4bNOJI
2375
+ pBoDJqyONC4
2376
+ pD8z8Dior4A
2377
+ pD9cAOBZDj4
2378
+ pDySXCjwXJg
2379
+ pFXjD9J-JE0
2380
+ pHgSyR3qUas
2381
+ pI1feWHeUq4
2382
+ pIeCH_unCd4
2383
+ pJznoC1fZ_U
2384
+ pKu6GJdrKOM
2385
+ pL3u5ztU9kA
2386
+ pOBuggPY9_c
2387
+ pQ4C6LaLV48
2388
+ pQMiwYP8mq0
2389
+ pQcXM9HJXjs
2390
+ pRm187D8-MQ
2391
+ pSeYDrDjV48
2392
+ pUayltZ4PRk
2393
+ pXg1P_wX79Q
2394
+ pZ3OMf01xJs
2395
+ pb85Bw9sHY0
2396
+ pcOdWPkjUzY
2397
+ peYcx-xDiwg
2398
+ pfC0yuJtMc0
2399
+ phnXyHYku8k
2400
+ pidRKmq2iGI
2401
+ pk5WuA39qzc
2402
+ pkSnKgRHjU8
2403
+ pnmOhTbhKhc
2404
+ po-Dx2CgFGs
2405
+ pp71BH0UlvE
2406
+ ppPbjVv2XeQ
2407
+ pru-95YczT4
2408
+ psfAc33Ok94
2409
+ puDuigN0_I8
2410
+ puZZoKkv2Nk
2411
+ pujqknUWycM
2412
+ px-H1yOAQUo
2413
+ pzIQGk5nWDw
2414
+ pzev9CB3vuM
2415
+ q-abd-Tjf_g
2416
+ q0whherUIeo
2417
+ q1NTjsXYpK0
2418
+ q1SYOXJWXNg
2419
+ q2S5mFVGVec
2420
+ q3kU8MyAHhE
2421
+ q3zhx8M7mgI
2422
+ q5uELNh_Fro
2423
+ q6irtmJxniE
2424
+ q7Pfap5IRMc
2425
+ q7qZd-5PQec
2426
+ q8G-VO3gy5o
2427
+ q8PoT8bhElc
2428
+ qDzY7_qAP9E
2429
+ qEHexC4KaLA
2430
+ qGl8JMXmwL0
2431
+ qH8uMr3qb_M
2432
+ qKMd70Jr-ws
2433
+ qKnrUrYLlj0
2434
+ qLciglFWSBY
2435
+ qLxgp5VIx4A
2436
+ qLyCi_ARgfM
2437
+ qMoypa93-_M
2438
+ qMroyKN05zQ
2439
+ qOxbcVscVuQ
2440
+ qRiBE2DuGA4
2441
+ qTPgNaT-IyY
2442
+ qUbu1hTOKQQ
2443
+ qWd6VN9sseU
2444
+ qXBgNj7d2aI
2445
+ qYW_VBZoTsM
2446
+ qZdAl2dAL7k
2447
+ q_EJdzfnPSg
2448
+ qatNSkb_O3Y
2449
+ qav1y7G15JQ
2450
+ qb4Kx1MoxGQ
2451
+ qbaocv8MUJI
2452
+ qckQShckJI0
2453
+ qfKmOf3d0fc
2454
+ qj63Fyah8Jw
2455
+ qmwgjonregk
2456
+ qnQhGZNOIzE
2457
+ qnRbz82xzWk
2458
+ qnboyP15mi8
2459
+ qo_ZzjhbWLk
2460
+ qrmmPQC6o4I
2461
+ qsOjHdZtUM4
2462
+ qwMVYILJ7bc
2463
+ qxabzkWQ744
2464
+ r0i10UvjWBY
2465
+ r28QlaYxRDY
2466
+ r2REC5k-2AE
2467
+ r2vFxIWtQ-E
2468
+ r341ehyHft4
2469
+ r40wnEAEozQ
2470
+ r4yqeuWlJqM
2471
+ r59GRI81jQU
2472
+ r6xt8HZy1-k
2473
+ r7vzgexzXOk
2474
+ rA2g4mWai58
2475
+ rA4jmcefefQ
2476
+ rAnvXFlvV3M
2477
+ rEJBmcJlEy0
2478
+ rFEg1iwdHBM
2479
+ rFOs4b_WTYY
2480
+ rGBU08IrdrI
2481
+ rGqAwF16IOk
2482
+ rHZlSM-m_Xo
2483
+ rHgcNbOwnas
2484
+ rI8ccs3k0kE
2485
+ rJXB9VhK1eM
2486
+ rJc0Pf2B7Mg
2487
+ rKCT1mmxfb0
2488
+ rKvnvavLhKg
2489
+ rLUoN3LY6Fk
2490
+ rLnNaewOAbY
2491
+ rNOyRIU9WUk
2492
+ rNqiDGKZiZE
2493
+ rPa_Q-v7iPU
2494
+ rRexKBeW-LY
2495
+ rRsjDRqRvgA
2496
+ rS8oFARouuA
2497
+ rTgVxBHEKJY
2498
+ rWMEyWjaDug
2499
+ rZ5iulZ3rvk
2500
+ rc744Z9IjhY
2501
+ rcdFlgadpYQ
2502
+ rfzHohoFoZc
2503
+ rgj6AQ0KiXI
2504
+ rhFDM4YtAjM
2505
+ rizm_pzU7xo
2506
+ rjjFQ6onAJM
2507
+ rjlNd5leBDY
2508
+ rl4qZtDSLCs
2509
+ rl6K4-2q4JI
2510
+ rnzWJv_L13Y
2511
+ rpC49dRxPkM
2512
+ rqqgH8fMsOs
2513
+ rrzJQMo6QQ4
2514
+ ruAnF4N_8y4
2515
+ rw0AUfItOss
2516
+ ryJxN-ZRAGk
2517
+ rztwo4iUnqo
2518
+ s1QCigF6JOg
2519
+ s2Ya_pPoKQw
2520
+ s322l02OzWM
2521
+ s4b9dN-X32Q
2522
+ s5eZtpsX2Uw
2523
+ s6LlkCsQSq0
2524
+ s8i4K8MvQjg
2525
+ s9aNVQP9Bi8
2526
+ s9zkSyuL8eQ
2527
+ sCk_3RNWdZI
2528
+ sEnf_XAMuso
2529
+ sGvmS07G46I
2530
+ sIesHZHrHck
2531
+ sJIvQ5imN20
2532
+ sK-QPNTRj24
2533
+ sKFiaAcqTMY
2534
+ sKRcy61BFfM
2535
+ sNHDCXNLLlw
2536
+ sS7O_qMN_pM
2537
+ sSrSorPjUvU
2538
+ sTKAL3OfgSQ
2539
+ s_A2wZB7ZfA
2540
+ s_gSNpE1Z5Y
2541
+ sb7TL-252e0
2542
+ sboMcBqlShY
2543
+ sdeLg0on1Go
2544
+ sdnbXzi5yDQ
2545
+ sebF4SCrhgY
2546
+ sgGPHfZ6O00
2547
+ sgRTFPJ1MdE
2548
+ shkbyqqkkTA
2549
+ si4zS_Jx_uY
2550
+ skyd1JiJiYs
2551
+ slAdtDXiNgI
2552
+ sp4yAT08rqw
2553
+ sphx939ru4U
2554
+ spi7TCSQqns
2555
+ sqseuym5HKI
2556
+ ssHyZRRz6ek
2557
+ stqdcOSqbnI
2558
+ suK34prc56o
2559
+ swP3fNDUD0M
2560
+ sy-JNEKRe6w
2561
+ t-N3In2rLI4
2562
+ t1hyNFQq1Qc
2563
+ t1ok0e9gTRo
2564
+ t3SPY13b64M
2565
+ t43zNbKooJs
2566
+ t4xtn8b1Nk0
2567
+ t7caALtdVBM
2568
+ t7f-iQf3PRA
2569
+ t9nyOewqrU0
2570
+ tAbhaguKARw
2571
+ tBvd7OSDGgQ
2572
+ tC0vzGbKXWs
2573
+ tCskNV4zvIA
2574
+ tDNEWvgVQU8
2575
+ tE9_u7slTrc
2576
+ tFHxf3tCRr4
2577
+ tGQgcHMIq1g
2578
+ tHGLoklGaE0
2579
+ tIjr1ZMPEkc
2580
+ tMTZDgGaWqE
2581
+ tPX6j_22umA
2582
+ tQRaArBTpu0
2583
+ tRBf2g6XqCU
2584
+ tSUX6HReL6o
2585
+ tSf72G2vTU8
2586
+ tTyFNcuR8dw
2587
+ tUPzZFsH9NM
2588
+ tXwwOXxj3Wk
2589
+ tYS3l-lh39Q
2590
+ tZMO74qrrJk
2591
+ tZZyNqO9bQ0
2592
+ tZzyiJC7pqs
2593
+ teIlbfF2SE0
2594
+ tgKPxj4-0QE
2595
+ thhRZXvb0fE
2596
+ tj46eMPD8jw
2597
+ tj79bCk8Y7E
2598
+ tlPIi5sLOoI
2599
+ tlSl7JSQ35w
2600
+ tm50929wbaA
2601
+ tmAx6umELmU
2602
+ tnX_ibkICWk
2603
+ tng9I_Yfhg8
2604
+ trGNjNAxMMA
2605
+ tssS0XyUIi0
2606
+ tsxCu5Fom2A
2607
+ tu3g6qTyWiU
2608
+ tvsBIX44mQM
2609
+ tygH_zAKRvc
2610
+ u2gB0EGQxxA
2611
+ u5TYUHIGqzA
2612
+ u5eti_-WK9g
2613
+ u61kdTMLJTQ
2614
+ u7bLXr675gw
2615
+ u8B3lvaw9tw
2616
+ u9QPtZEhH3o
2617
+ uAWBQI7PVKA
2618
+ uAkCPrihEzQ
2619
+ uAsqM8rL8GA
2620
+ uB9UEkO77MU
2621
+ uCQMZfnM-a4
2622
+ uGUee74VHAc
2623
+ uHmQzfPWBG0
2624
+ uK0u0BHCF3s
2625
+ uKbvq15Pjw8
2626
+ uLycRSuHSAs
2627
+ uM9zA0QAvhA
2628
+ uOWT7-wqKS4
2629
+ uQ2I4dZh9O4
2630
+ uQEUqgEVQvU
2631
+ uQz8Y_jYzy0
2632
+ uSs1HeOShlY
2633
+ uTUY9W-eDZk
2634
+ uW6e50NYlWE
2635
+ uZEfW4QJot0
2636
+ ufnnbfJSZ3M
2637
+ ulMrIHCeLQU
2638
+ un5l34RRYR4
2639
+ uomUPcl17N8
2640
+ upw774ewNjg
2641
+ uqCecnqjL1g
2642
+ uqav8KWIBPM
2643
+ uvNun-YAzwk
2644
+ uwJtuNmBb90
2645
+ uxBzmTylbwg
2646
+ uxQbWbWVWug
2647
+ uxy82zQnQpo
2648
+ v-2yFMzxqwU
2649
+ v1DXBLkiqFM
2650
+ v1RFw8FpEJQ
2651
+ v1wFRiV9_v4
2652
+ v2DjHVhnjQI
2653
+ v2_u0yV5ua0
2654
+ v5vIyfkumj0
2655
+ v62wDFbK7iM
2656
+ v7-eoymQp3o
2657
+ v79U6LcewMc
2658
+ v7iWP-4rYg4
2659
+ v8R4spPNofA
2660
+ vC-uE8RAzbo
2661
+ vC9i_jB5h1k
2662
+ vKHi1XtBPQE
2663
+ vLb2GXb-bsE
2664
+ vM39qhXle4g
2665
+ vO87PpdQKV4
2666
+ vOFoltimf5o
2667
+ vQtLms02PFM
2668
+ vT4z4TCf4uk
2669
+ vTKx76zG23U
2670
+ vXPaPV2Ii-Y
2671
+ v_bumG-g0Dc
2672
+ vblot9RiULI
2673
+ vcL75ICzL0w
2674
+ vf4CrrsnVWs
2675
+ vhZqXOG9Sfo
2676
+ vi9kXfl4Nzo
2677
+ vjjNnix_rGQ
2678
+ vlddfDNoCS8
2679
+ vmGW-kulgy8
2680
+ vmhYHtGvqCI
2681
+ vmtEmebxSwo
2682
+ vpAyvSQdqvU
2683
+ vqqv18miTHs
2684
+ vsrK3zDsT4c
2685
+ vt6evh25rpY
2686
+ vtBXhiKZjWk
2687
+ vuHbp02YfKA
2688
+ vuThpe-Rgxs
2689
+ vueMpDhosM8
2690
+ vviufcserks
2691
+ vwuBZEo9eAE
2692
+ vyUiZfxuXXk
2693
+ vyWMhZ-NiHg
2694
+ vyrHk4_-epU
2695
+ w09mGrJy_h8
2696
+ w0ztlIAYTCU
2697
+ w4ZSe355UKY
2698
+ w4fzfZkZ2Ng
2699
+ w7zqBnrLxiw
2700
+ w9V5LKdTPRY
2701
+ w9rXlL5iJNk
2702
+ wAaHY_SXdgY
2703
+ wB1IOUjeiXU
2704
+ wB5lKhN4uoY
2705
+ wD-jLNmRVfw
2706
+ wDT446J2XME
2707
+ wF17z_DX7DA
2708
+ wFS3088FEUk
2709
+ wFkig7Vh8YU
2710
+ wHJB9u61btI
2711
+ wJKn7xElol8
2712
+ wKnbAj-2Hnw
2713
+ wLvCU_o362k
2714
+ wN-xfvQKBgc
2715
+ wNcTvkR4u8s
2716
+ wRQGTMig_P8
2717
+ wSDR6ymotHk
2718
+ wSO5y2Vq8Ss
2719
+ wTHo7j8Ruww
2720
+ wTYpjp9Bzrs
2721
+ wUnPxBv2h1E
2722
+ wV7TB-8PyhU
2723
+ wVnhS8BuE4U
2724
+ w_Ms7re3lYU
2725
+ waHHFLAet8A
2726
+ wbokl5PhXEg
2727
+ wfCZYvqVFKM
2728
+ whbyuy2nHBg
2729
+ wkYeYfLt8P4
2730
+ wm9QvbgryRg
2731
+ wmKPIm2HuXM
2732
+ wnH9X7Y8-fw
2733
+ wonfEPTNA0w
2734
+ wrtkQSU_Ekc
2735
+ wrtrwY2caSI
2736
+ wwBfqXf-1Xg
2737
+ wxNELCR-TMk
2738
+ wyM-7ra8uY4
2739
+ wz3TSWrZVjM
2740
+ x-YG6j9ZYsY
2741
+ x-ty9x6-P1o
2742
+ x1HMmnAI5sc
2743
+ x2Sh6TOWjkM
2744
+ x47V2e-4NmY
2745
+ x7jZG8cbqBI
2746
+ x8LIzP4vBrQ
2747
+ x8jdx-lf2Dw
2748
+ x99gUDpak34
2749
+ xCDQAlQfzNs
2750
+ xD1mRhx68eE
2751
+ xFFs9UgOAlE
2752
+ xGoCF5bk56A
2753
+ xITRYnvl0oc
2754
+ xIyfkI0jJMs
2755
+ xJUlD1q4bRw
2756
+ xL32w7DMwHM
2757
+ xLSzvPQpo5A
2758
+ xMhd22uQusw
2759
+ xSaGl8fiiYk
2760
+ xVuZZS93qtc
2761
+ xWrJD9-1o44
2762
+ xZ0Xdew--H4
2763
+ x_0F7lFtPRY
2764
+ xe21-4hVA20
2765
+ xfXkzZ62y8g
2766
+ xgpgHZ9iTMo
2767
+ xjwSr2qv1cY
2768
+ xkf9Gk1QuYo
2769
+ xmXiws6G8Mc
2770
+ xnLoToJVQH4
2771
+ xog_GEISGDE
2772
+ xokbf-np33Q
2773
+ xt7bRz9dX4A
2774
+ xuVTCXSW6WM
2775
+ xz7Ilex4McU
2776
+ y0jwPOra9Jg
2777
+ y2tCX54cDx4
2778
+ y5yxjABbdVQ
2779
+ y6Dh74INR_I
2780
+ y6Ge4RZsq4I
2781
+ y6zsW14YDBY
2782
+ y7bKFMO0d-w
2783
+ y9L5dA7pQxk
2784
+ yAe2kgW47G0
2785
+ yBEfb0XnMec
2786
+ yF8dBlsLn6k
2787
+ yHaY_d3QQR8
2788
+ yLkqI2UiZJU
2789
+ yMSj47Xs2_g
2790
+ yPqieOw41gc
2791
+ yRV2bb-tRMk
2792
+ yV6ZXGQDFfo
2793
+ yWJDUISGUp4
2794
+ yX5g1OHKE-E
2795
+ yXRXk5ubwW8
2796
+ yXhj9XnXpbI
2797
+ yXrJCyJBugg
2798
+ yY9QkbpDLaE
2799
+ yZ6vSn7PaPI
2800
+ y_-a5D-1_mE
2801
+ ya8cSGAiEyw
2802
+ yc0wPSjL8rc
2803
+ yc3l8Xsk4k4
2804
+ yeVl67bOTD4
2805
+ yew316-hSiM
2806
+ yg49p-i94rM
2807
+ yjD-_z6P75s
2808
+ yn_0j3hDnPI
2809
+ yqCnnSfXrRg
2810
+ yrqpVREbHfs
2811
+ ysCHqf1HfEQ
2812
+ ysogiWIjy10
2813
+ yu2iuSrpjHo
2814
+ yu6gBfUQFNI
2815
+ yuGOVGNkjAA
2816
+ yuNg5JugqrE
2817
+ yumyfeToWyg
2818
+ yvGUNb80FTI
2819
+ yvWV56LXTh8
2820
+ yvcUoZqSvbk
2821
+ yw--rqjGfI8
2822
+ ywljr9RKExQ
2823
+ yxPk1s6kjls
2824
+ yxuTqbqzzpM
2825
+ yz4H6fx15Uk
2826
+ z-KziTO_5so
2827
+ z2-YMvHXvkk
2828
+ z6kC2r9UrF8
2829
+ z83CZnYt72o
2830
+ z8X4DB-ecWM
2831
+ z8gNQpsYIco
2832
+ z93Kr9LmMCo
2833
+ zArkP0YCXw0
2834
+ zESeeaFDVSw
2835
+ zF-pekRLJ3M
2836
+ zF9VM-voMFQ
2837
+ zJIKjeOLOFw
2838
+ zJahR4BmskA
2839
+ zJsr4Tie-tk
2840
+ zJuuBn8mTP8
2841
+ zKkJB8Etq54
2842
+ zKwgR9IBovs
2843
+ zL_d7b1bsUE
2844
+ zLaE0SmZdMo
2845
+ zQpGf1gPY7M
2846
+ zUDEten_j9o
2847
+ zUkN2ILNflA
2848
+ zYYWeXjQn8M
2849
+ zZb8HVaO4Nc
2850
+ z_cAYz0Q5DI
2851
+ zb8B-vxmjTg
2852
+ zdIcTkGHFac
2853
+ zeO8n9byqg0
2854
+ zesVY6sbjoM
2855
+ zgs82z9t7Mw
2856
+ zhESYHHbzsc
2857
+ zhI__xPhoW4
2858
+ zhzlGFKXV2g
2859
+ zjnOeTL7Bz8
2860
+ zlHe6erNcQA
2861
+ zmn50KVgQak
2862
+ zotrHY6f1-4
2863
+ zpRuDsk_0vk
2864
+ zqFEtAUGItg
2865
+ zqVaXf-E8us
2866
+ zucFlVCiV-g
2867
+ zuk7TtRuR-U
2868
+ zwGcVuZCM4Y
2869
+ zxN1HYq4iEg
2870
+ zygTUJZ93dE
2871
+ zzQoGtjkKuU
2872
+ zzpYxy7lveg
TalkingHead-1KH/data_list/train_video_tubes.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86b9a6514dc4abdc9484c5fa93a3ad5db3c5ecb965181e92d16b9fd95ff8222a
3
+ size 29153463
TalkingHead-1KH/data_list/val_video_ids.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1lSejjfNHpw
2
+ 2Xu56MEC91w
3
+ 3y6Vjr45I34
4
+ 4hQi42Q9mcY
5
+ 5crEV5DbRyc
6
+ 85UEFVcmIjI
7
+ A2800grpOzU
8
+ c1DRo3tPDG4
9
+ d_7s4huYOD4
10
+ EGGsK7po68c
11
+ eKFlMKp9Gs0
12
+ EWKJprUrnPE
13
+ gp4fg9PWuhM
14
+ HBlkinewdHM
15
+ jpCrKYWjYD8
16
+ jxi_Cjc8T1w
17
+ kMXhWN71Ar0
18
+ m2ZmZflLryo
19
+ npEcenV-Y08
20
+ nUe3F8jYoZo
21
+ NXpWIephX1o
22
+ PAaWZTFRP9Q
23
+ SmtJ5Cy4jCM
24
+ SU8NSkuBkb0
25
+ VkKnOEQlwl4
26
+ WigprYZPaLc
27
+ YsrzvkG5_KI
28
+ Zel-zag38mQ
TalkingHead-1KH/data_list/val_video_tubes.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1lSejjfNHpw_0075, 1080, 1920, 0, 728, 671, 47, 1471, 847
2
+ 1lSejjfNHpw_0075, 1080, 1920, 728, 1456, 671, 47, 1471, 847
3
+ 2Xu56MEC91w_0046, 1080, 1920, 80, 1105, 586, 86, 1314, 814
4
+ 3y6Vjr45I34_0004, 1080, 1920, 287, 1254, 568, 0, 1464, 896
5
+ 4hQi42Q9mcY_0002, 1080, 1920, 0, 605, 443, 0, 1515, 992
6
+ 4hQi42Q9mcY_0002, 1080, 1920, 605, 1209, 443, 0, 1515, 992
7
+ 5crEV5DbRyc_0009, 1080, 1920, 208, 1152, 1058, 102, 1712, 756
8
+ 85UEFVcmIjI_0014, 1080, 1920, 92, 627, 558, 134, 1294, 870
9
+ 85UEFVcmIjI_0014, 1080, 1920, 627, 1162, 558, 134, 1294, 870
10
+ A2800grpOzU_0002, 1080, 1920, 812, 1407, 227, 7, 1139, 919
11
+ EGGsK7po68c_0007, 1080, 1920, 0, 1024, 786, 50, 1598, 862
12
+ EWKJprUrnPE_0005, 1080, 1920, 0, 1024, 84, 168, 702, 786
13
+ HBlkinewdHM_0000, 1080, 1920, 319, 1344, 807, 149, 1347, 689
14
+ NXpWIephX1o_0031, 1080, 1920, 0, 632, 357, 0, 1493, 1072
15
+ NXpWIephX1o_0031, 1080, 1920, 632, 1264, 357, 0, 1493, 1072
16
+ PAaWZTFRP9Q_0001, 1080, 1920, 0, 672, 624, 42, 1376, 794
17
+ PAaWZTFRP9Q_0001, 1080, 1920, 926, 1425, 696, 101, 1464, 869
18
+ SU8NSkuBkb0_0015, 1080, 1920, 826, 1397, 347, 69, 1099, 821
19
+ SmtJ5Cy4jCM_0006, 1080, 1920, 0, 523, 524, 50, 1388, 914
20
+ SmtJ5Cy4jCM_0006, 1080, 1920, 546, 1134, 477, 42, 1357, 922
21
+ VkKnOEQlwl4_0010, 1080, 1920, 98, 818, 821, 22, 1733, 934
22
+ VkKnOEQlwl4_0010, 1080, 1920, 818, 1537, 821, 22, 1733, 934
23
+ WigprYZPaLc_0002, 1080, 1920, 234, 877, 802, 25, 1490, 713
24
+ WigprYZPaLc_0002, 1080, 1920, 877, 1519, 802, 25, 1490, 713
25
+ YsrzvkG5_KI_0018, 1080, 1920, 36, 1061, 591, 100, 1055, 564
26
+ Zel-zag38mQ_0001, 1080, 1920, 0, 733, 591, 12, 1439, 860
27
+ Zel-zag38mQ_0001, 1080, 1920, 733, 1466, 591, 12, 1439, 860
28
+ c1DRo3tPDG4_0010, 1080, 1920, 0, 865, 432, 33, 1264, 865
29
+ c1DRo3tPDG4_0010, 1080, 1920, 865, 1730, 432, 33, 1264, 865
30
+ eKFlMKp9Gs0_0005, 1080, 1920, 0, 1024, 705, 118, 1249, 662
31
+ gp4fg9PWuhM_0003, 1080, 1920, 0, 858, 526, 0, 1310, 768
32
+ jpCrKYWjYD8_0002, 1080, 1920, 0, 768, 527, 68, 1215, 756
33
+ jpCrKYWjYD8_0002, 1080, 1920, 768, 1535, 527, 68, 1215, 756
34
+ jxi_Cjc8T1w_0061, 1080, 1920, 0, 1024, 660, 102, 1286, 728
35
+ kMXhWN71Ar0_0001, 1080, 1920, 0, 656, 60, 0, 940, 832
36
+ kMXhWN71Ar0_0001, 1080, 1920, 656, 1311, 60, 0, 940, 832
37
+ m2ZmZflLryo_0009, 1080, 1920, 0, 1024, 678, 51, 1390, 763
38
+ npEcenV-Y08_0011, 1080, 1920, 99, 1087, 625, 69, 1425, 869
TalkingHead-1KH/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ ffmpeg-python
2
+ imageio
3
+ git+https://github.com/nficano/pytube
4
+ tqdm
TalkingHead-1KH/teaser.gif ADDED

Git LFS Details

  • SHA256: b6408b915b161270fedbfc932daa9a6615b49dbc2d2691bb9d794fe91fbdb18d
  • Pointer size: 132 Bytes
  • Size of remote file: 3.93 MB
TalkingHead-1KH/videos_crop.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # This script is licensed under the MIT License.
4
+
5
+ import argparse
6
+ import multiprocessing as mp
7
+ import os
8
+ from functools import partial
9
+ from time import time as timer
10
+
11
+ import ffmpeg
12
+ from tqdm import tqdm
13
+
14
+
15
+ parser = argparse.ArgumentParser()
16
+ parser.add_argument('--input_dir', type=str, required=True,
17
+ help='Dir containing youtube clips.')
18
+ parser.add_argument('--clip_info_file', type=str, required=True,
19
+ help='File containing clip information.')
20
+ parser.add_argument('--output_dir', type=str, required=True,
21
+ help='Location to dump outputs.')
22
+ parser.add_argument('--num_workers', type=int, default=8,
23
+ help='How many multiprocessing workers?')
24
+ args = parser.parse_args()
25
+
26
+
27
+ def get_h_w(filepath):
28
+ probe = ffmpeg.probe(filepath)
29
+ video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
30
+ height = int(video_stream['height'])
31
+ width = int(video_stream['width'])
32
+ return height, width
33
+
34
+
35
+ def trim_and_crop(input_dir, output_dir, clip_params):
36
+ video_name, H, W, S, E, L, T, R, B = clip_params.strip().split(',')
37
+ H, W, S, E, L, T, R, B = int(H), int(W), int(S), int(E), int(L), int(T), int(R), int(B)
38
+ output_filename = '{}_S{}_E{}_L{}_T{}_R{}_B{}.mp4'.format(video_name, S, E, L, T, R, B)
39
+ output_filepath = os.path.join(output_dir, output_filename)
40
+ if os.path.exists(output_filepath):
41
+ print('Output file %s exists, skipping' % (output_filepath))
42
+ return
43
+
44
+ input_filepath = os.path.join(input_dir, video_name + '.mp4')
45
+ if not os.path.exists(input_filepath):
46
+ print('Input file %s does not exist, skipping' % (input_filepath))
47
+ return
48
+
49
+ h, w = get_h_w(input_filepath)
50
+ t = int(T / H * h)
51
+ b = int(B / H * h)
52
+ l = int(L / W * w)
53
+ r = int(R / W * w)
54
+ stream = ffmpeg.input(input_filepath)
55
+ stream = ffmpeg.trim(stream, start_frame=S, end_frame=E+1)
56
+ stream = ffmpeg.crop(stream, l, t, r-l, b-t)
57
+ stream = ffmpeg.output(stream, output_filepath)
58
+ ffmpeg.run(stream)
59
+
60
+
61
+ if __name__ == '__main__':
62
+ # Read list of videos.
63
+ clip_info = []
64
+ with open(args.clip_info_file) as fin:
65
+ for line in fin:
66
+ clip_info.append(line.strip())
67
+
68
+ # Create output folder.
69
+ os.makedirs(args.output_dir, exist_ok=True)
70
+
71
+ # Download videos.
72
+ downloader = partial(trim_and_crop, args.input_dir, args.output_dir)
73
+
74
+ start = timer()
75
+ pool_size = args.num_workers
76
+ print('Using pool size of %d' % (pool_size))
77
+ with mp.Pool(processes=pool_size) as p:
78
+ _ = list(tqdm(p.imap_unordered(downloader, clip_info), total=len(clip_info)))
79
+ print('Elapsed time: %.2f' % (timer() - start))
TalkingHead-1KH/videos_download.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import multiprocessing as mp
3
+ import os
4
+ from functools import partial
5
+ from time import time as timer
6
+
7
+ from pytube import YouTube
8
+ from tqdm import tqdm
9
+
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument('--input_list', type=str, required=True,
12
+ help='List of youtube video ids')
13
+ parser.add_argument('--output_dir', type=str, default='data/youtube_videos',
14
+ help='Location to download videos')
15
+ parser.add_argument('--num_workers', type=int, default=8,
16
+ help='How many multiprocessing workers?')
17
+ args = parser.parse_args()
18
+
19
+
20
+ def download_video(output_dir, video_id):
21
+ r"""Download video."""
22
+ video_path = '%s/%s.mp4' % (output_dir, video_id)
23
+ if not os.path.isfile(video_path):
24
+ try:
25
+ # Download the highest quality mp4 stream.
26
+ yt = YouTube('https://www.youtube.com/watch?v=%s' % (video_id))
27
+ stream = yt.streams.filter(subtype='mp4', only_video=True, adaptive=True).first()
28
+ if stream is None:
29
+ stream = yt.streams.filter(subtype='mp4').first()
30
+ stream.download(output_path=output_dir, filename=video_id + '.mp4')
31
+ except Exception as e:
32
+ print(e)
33
+ print('Failed to download %s' % (video_id))
34
+ else:
35
+ print('File exists: %s' % (video_id))
36
+
37
+
38
+ if __name__ == '__main__':
39
+ # Read list of videos.
40
+ video_ids = []
41
+ with open(args.input_list) as fin:
42
+ for line in fin:
43
+ video_ids.append(line.strip())
44
+
45
+ # Create output folder.
46
+ os.makedirs(args.output_dir, exist_ok=True)
47
+
48
+ # Download videos.
49
+ downloader = partial(download_video, args.output_dir)
50
+
51
+ start = timer()
52
+ pool_size = args.num_workers
53
+ print('Using pool size of %d' % (pool_size))
54
+ with mp.Pool(processes=pool_size) as p:
55
+ _ = list(tqdm(p.imap_unordered(downloader, video_ids), total=len(video_ids)))
56
+ print('Elapsed time: %.2f' % (timer() - start))
TalkingHead-1KH/videos_download_and_crop.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset=$1
2
+
3
+ # Download the videos.
4
+ python videos_download.py --input_list data_list/${dataset}_video_ids.txt --output_dir ${dataset}/raw_videos
5
+
6
+ # Split the videos into 1-min chunks.
7
+ ./videos_split.sh ${dataset}/raw_videos ${dataset}/1min_clips
8
+
9
+ # Extract the talking head clips.
10
+ python videos_crop.py --input_dir ${dataset}/1min_clips/ --output_dir ${dataset}/cropped_clips --clip_info_file data_list/${dataset}_video_tubes.txt
TalkingHead-1KH/videos_split.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ in_dir=$1
4
+ out_dir=$2
5
+
6
+ mkdir $out_dir;
7
+ for f in $in_dir/*.mp4
8
+ do
9
+ y=${f##*/};
10
+ ffmpeg -i $f -c copy -map 0 -segment_time 00:01:00 -f segment $out_dir/${y/.mp4}_%04d.mp4;
11
+ done
checkpoints/.gitkeep ADDED
File without changes
data_gen/eg3d/convert_to_eg3d_convention.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import copy
4
+ from utils.commons.tensor_utils import convert_to_tensor, convert_to_np
5
+ from deep_3drecon.deep_3drecon_models.bfm import ParametricFaceModel
6
+
7
+
8
+ def _fix_intrinsics(intrinsics):
9
+ """
10
+ intrinsics: [3,3], not batch-wise
11
+ """
12
+ # unnormalized normalized
13
+
14
+ # [[ f_x, s=0, x_0] [[ f_x/size_x, s=0, x_0/size_x=0.5]
15
+ # [ 0, f_y, y_0] -> [ 0, f_y/size_y, y_0/size_y=0.5]
16
+ # [ 0, 0, 1 ]] [ 0, 0, 1 ]]
17
+ intrinsics = np.array(intrinsics).copy()
18
+ assert intrinsics.shape == (3, 3), intrinsics
19
+ intrinsics[0,0] = 2985.29/700
20
+ intrinsics[1,1] = 2985.29/700
21
+ intrinsics[0,2] = 1/2
22
+ intrinsics[1,2] = 1/2
23
+ assert intrinsics[0,1] == 0
24
+ assert intrinsics[2,2] == 1
25
+ assert intrinsics[1,0] == 0
26
+ assert intrinsics[2,0] == 0
27
+ assert intrinsics[2,1] == 0
28
+ return intrinsics
29
+
30
+ # Used in original submission
31
+ def _fix_pose_orig(pose):
32
+ """
33
+ pose: [4,4], not batch-wise
34
+ """
35
+ pose = np.array(pose).copy()
36
+ location = pose[:3, 3]
37
+ radius = np.linalg.norm(location)
38
+ pose[:3, 3] = pose[:3, 3]/radius * 2.7
39
+ return pose
40
+
41
+
42
+ def get_eg3d_convention_camera_pose_intrinsic(item):
43
+ """
44
+ item: a dict during binarize
45
+
46
+ """
47
+ if item['euler'].ndim == 1:
48
+ angle = convert_to_tensor(copy.copy(item['euler']))
49
+ trans = copy.deepcopy(item['trans'])
50
+
51
+ # handle the difference of euler axis between eg3d and ours
52
+ # see data_gen/process_ffhq_for_eg3d/transplant_eg3d_ckpt_into_our_convention.ipynb
53
+ # angle += torch.tensor([0, 3.1415926535, 3.1415926535], device=angle.device)
54
+ R = ParametricFaceModel.compute_rotation(angle.unsqueeze(0))[0].cpu().numpy()
55
+ trans[2] += -10
56
+ c = -np.dot(R, trans)
57
+ pose = np.eye(4)
58
+ pose[:3,:3] = R
59
+ c *= 0.27 # normalize camera radius
60
+ c[1] += 0.006 # additional offset used in submission
61
+ c[2] += 0.161 # additional offset used in submission
62
+ pose[0,3] = c[0]
63
+ pose[1,3] = c[1]
64
+ pose[2,3] = c[2]
65
+
66
+ focal = 2985.29 # = 1015*1024/224*(300/466.285),
67
+ # todo: 如果修改了fit 3dmm阶段的camera intrinsic,这里也要跟着改
68
+ pp = 512#112
69
+ w = 1024#224
70
+ h = 1024#224
71
+
72
+ K = np.eye(3)
73
+ K[0][0] = focal
74
+ K[1][1] = focal
75
+ K[0][2] = w/2.0
76
+ K[1][2] = h/2.0
77
+ convention_K = _fix_intrinsics(K)
78
+
79
+ Rot = np.eye(3)
80
+ Rot[0, 0] = 1
81
+ Rot[1, 1] = -1
82
+ Rot[2, 2] = -1
83
+ pose[:3, :3] = np.dot(pose[:3, :3], Rot) # permute axes
84
+ convention_pose = _fix_pose_orig(pose)
85
+
86
+ item['c2w'] = pose
87
+ item['convention_c2w'] = convention_pose
88
+ item['intrinsics'] = convention_K
89
+ return item
90
+ else:
91
+ num_samples = len(item['euler'])
92
+ eulers_all = convert_to_tensor(copy.deepcopy(item['euler'])) # [B, 3]
93
+ trans_all = copy.deepcopy(item['trans']) # [B, 3]
94
+
95
+ # handle the difference of euler axis between eg3d and ours
96
+ # see data_gen/process_ffhq_for_eg3d/transplant_eg3d_ckpt_into_our_convention.ipynb
97
+ # eulers_all += torch.tensor([0, 3.1415926535, 3.1415926535], device=eulers_all.device).unsqueeze(0).repeat([eulers_all.shape[0],1])
98
+
99
+ intrinsics = []
100
+ poses = []
101
+ convention_poses = []
102
+ for i in range(num_samples):
103
+ angle = eulers_all[i]
104
+ trans = trans_all[i]
105
+ R = ParametricFaceModel.compute_rotation(angle.unsqueeze(0))[0].cpu().numpy()
106
+ trans[2] += -10
107
+ c = -np.dot(R, trans)
108
+ pose = np.eye(4)
109
+ pose[:3,:3] = R
110
+ c *= 0.27 # normalize camera radius
111
+ c[1] += 0.006 # additional offset used in submission
112
+ c[2] += 0.161 # additional offset used in submission
113
+ pose[0,3] = c[0]
114
+ pose[1,3] = c[1]
115
+ pose[2,3] = c[2]
116
+
117
+ focal = 2985.29 # = 1015*1024/224*(300/466.285),
118
+ # todo: 如果修改了fit 3dmm阶段的camera intrinsic,这里也要跟着改
119
+ pp = 512#112
120
+ w = 1024#224
121
+ h = 1024#224
122
+
123
+ K = np.eye(3)
124
+ K[0][0] = focal
125
+ K[1][1] = focal
126
+ K[0][2] = w/2.0
127
+ K[1][2] = h/2.0
128
+ convention_K = _fix_intrinsics(K)
129
+ intrinsics.append(convention_K)
130
+
131
+ Rot = np.eye(3)
132
+ Rot[0, 0] = 1
133
+ Rot[1, 1] = -1
134
+ Rot[2, 2] = -1
135
+ pose[:3, :3] = np.dot(pose[:3, :3], Rot)
136
+ convention_pose = _fix_pose_orig(pose)
137
+ convention_poses.append(convention_pose)
138
+ poses.append(pose)
139
+
140
+ intrinsics = np.stack(intrinsics) # [B, 3, 3]
141
+ poses = np.stack(poses) # [B, 4, 4]
142
+ convention_poses = np.stack(convention_poses) # [B, 4, 4]
143
+ item['intrinsics'] = intrinsics
144
+ item['c2w'] = poses
145
+ item['convention_c2w'] = convention_poses
146
+ return item
data_gen/runs/binarizer_nerf.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import math
4
+ import json
5
+ import imageio
6
+ import torch
7
+ import tqdm
8
+ import cv2
9
+
10
+ from data_util.face3d_helper import Face3DHelper
11
+ from utils.commons.euler2rot import euler_trans_2_c2w, c2w_to_euler_trans
12
+ from data_gen.utils.process_video.euler2quaterion import euler2quaterion, quaterion2euler
13
+ from deep_3drecon.deep_3drecon_models.bfm import ParametricFaceModel
14
+
15
+
16
+ def euler2rot(euler_angle):
17
+ batch_size = euler_angle.shape[0]
18
+ theta = euler_angle[:, 0].reshape(-1, 1, 1)
19
+ phi = euler_angle[:, 1].reshape(-1, 1, 1)
20
+ psi = euler_angle[:, 2].reshape(-1, 1, 1)
21
+ one = torch.ones(batch_size, 1, 1).to(euler_angle.device)
22
+ zero = torch.zeros(batch_size, 1, 1).to(euler_angle.device)
23
+ rot_x = torch.cat((
24
+ torch.cat((one, zero, zero), 1),
25
+ torch.cat((zero, theta.cos(), theta.sin()), 1),
26
+ torch.cat((zero, -theta.sin(), theta.cos()), 1),
27
+ ), 2)
28
+ rot_y = torch.cat((
29
+ torch.cat((phi.cos(), zero, -phi.sin()), 1),
30
+ torch.cat((zero, one, zero), 1),
31
+ torch.cat((phi.sin(), zero, phi.cos()), 1),
32
+ ), 2)
33
+ rot_z = torch.cat((
34
+ torch.cat((psi.cos(), -psi.sin(), zero), 1),
35
+ torch.cat((psi.sin(), psi.cos(), zero), 1),
36
+ torch.cat((zero, zero, one), 1)
37
+ ), 2)
38
+ return torch.bmm(rot_x, torch.bmm(rot_y, rot_z))
39
+
40
+
41
+ def rot2euler(rot_mat):
42
+ batch_size = len(rot_mat)
43
+ # we assert that y in in [-0.5pi, 0.5pi]
44
+ cos_y = torch.sqrt(rot_mat[:, 1, 2] * rot_mat[:, 1, 2] + rot_mat[:, 2, 2] * rot_mat[:, 2, 2])
45
+ theta_x = torch.atan2(-rot_mat[:, 1, 2], rot_mat[:, 2, 2])
46
+ theta_y = torch.atan2(rot_mat[:, 2, 0], cos_y)
47
+ theta_z = torch.atan2(rot_mat[:, 0, 1], rot_mat[:, 0, 0])
48
+ euler_angles = torch.zeros([batch_size, 3])
49
+ euler_angles[:, 0] = theta_x
50
+ euler_angles[:, 1] = theta_y
51
+ euler_angles[:, 2] = theta_z
52
+ return euler_angles
53
+
54
+ index_lm68_from_lm468 = [127,234,93,132,58,136,150,176,152,400,379,365,288,361,323,454,356,70,63,105,66,107,336,296,334,293,300,168,197,5,4,75,97,2,326,305,
55
+ 33,160,158,133,153,144,362,385,387,263,373,380,61,40,37,0,267,270,291,321,314,17,84,91,78,81,13,311,308,402,14,178]
56
+
57
+ def plot_lm2d(lm2d):
58
+ WH = 512
59
+ img = np.ones([WH, WH, 3], dtype=np.uint8) * 255
60
+
61
+ for i in range(len(lm2d)):
62
+ x, y = lm2d[i]
63
+ color = (255,0,0)
64
+ img = cv2.circle(img, center=(int(x),int(y)), radius=3, color=color, thickness=-1)
65
+ font = cv2.FONT_HERSHEY_SIMPLEX
66
+ for i in range(len(lm2d)):
67
+ x, y = lm2d[i]
68
+ img = cv2.putText(img, f"{i}", org=(int(x),int(y)), fontFace=font, fontScale=0.3, color=(255,0,0))
69
+ return img
70
+
71
+ def get_face_rect(lms, h, w):
72
+ """
73
+ lms: [68, 2]
74
+ h, w: int
75
+ return: [4,]
76
+ """
77
+ assert len(lms) == 68
78
+ # min_x, max_x = np.min(lms, 0)[0], np.max(lms, 0)[0]
79
+ min_x, max_x = np.min(lms[:, 0]), np.max(lms[:, 0])
80
+ cx = int((min_x+max_x)/2.0)
81
+ cy = int(lms[27, 1])
82
+ h_w = int((max_x-cx)*1.5)
83
+ h_h = int((lms[8, 1]-cy)*1.15)
84
+ rect_x = cx - h_w
85
+ rect_y = cy - h_h
86
+ if rect_x < 0:
87
+ rect_x = 0
88
+ if rect_y < 0:
89
+ rect_y = 0
90
+ rect_w = min(w-1-rect_x, 2*h_w)
91
+ rect_h = min(h-1-rect_y, 2*h_h)
92
+ # rect = np.array((rect_x, rect_y, rect_w, rect_h), dtype=np.int32)
93
+ # rect = [rect_x, rect_y, rect_w, rect_h]
94
+ rect = [rect_x, rect_x + rect_w, rect_y, rect_y + rect_h] # min_j, max_j, min_i, max_i
95
+ return rect # this x is width, y is height
96
+
97
+ def get_lip_rect(lms, h, w):
98
+ """
99
+ lms: [68, 2]
100
+ h, w: int
101
+ return: [4,]
102
+ """
103
+ # this x is width, y is height
104
+ # for lms, lms[:, 0] is width, lms[:, 1] is height
105
+ assert len(lms) == 68
106
+ lips = slice(48, 60)
107
+ lms = lms[lips]
108
+ min_x, max_x = np.min(lms[:, 0]), np.max(lms[:, 0])
109
+ min_y, max_y = np.min(lms[:, 1]), np.max(lms[:, 1])
110
+ cx = int((min_x+max_x)/2.0)
111
+ cy = int((min_y+max_y)/2.0)
112
+ h_w = int((max_x-cx)*1.2)
113
+ h_h = int((max_y-cy)*1.2)
114
+
115
+ h_w = max(h_w, h_h)
116
+ h_h = h_w
117
+
118
+ rect_x = cx - h_w
119
+ rect_y = cy - h_h
120
+ rect_w = 2*h_w
121
+ rect_h = 2*h_h
122
+ if rect_x < 0:
123
+ rect_x = 0
124
+ if rect_y < 0:
125
+ rect_y = 0
126
+
127
+ if rect_x + rect_w > w:
128
+ rect_x = w - rect_w
129
+ if rect_y + rect_h > h:
130
+ rect_y = h - rect_h
131
+
132
+ rect = [rect_x, rect_x + rect_w, rect_y, rect_y + rect_h] # min_j, max_j, min_i, max_i
133
+ return rect # this x is width, y is height
134
+
135
+
136
+ # def get_lip_rect(lms, h, w):
137
+ # """
138
+ # lms: [68, 2]
139
+ # h, w: int
140
+ # return: [4,]
141
+ # """
142
+ # assert len(lms) == 68
143
+ # lips = slice(48, 60)
144
+ # # this x is width, y is height
145
+ # xmin, xmax = int(lms[lips, 1].min()), int(lms[lips, 1].max())
146
+ # ymin, ymax = int(lms[lips, 0].min()), int(lms[lips, 0].max())
147
+ # # padding to H == W
148
+ # cx = (xmin + xmax) // 2
149
+ # cy = (ymin + ymax) // 2
150
+ # l = max(xmax - xmin, ymax - ymin) // 2
151
+ # xmin = max(0, cx - l)
152
+ # xmax = min(h, cx + l)
153
+ # ymin = max(0, cy - l)
154
+ # ymax = min(w, cy + l)
155
+ # lip_rect = [xmin, xmax, ymin, ymax]
156
+ # return lip_rect
157
+
158
+ def get_win_conds(conds, idx, smo_win_size=8, pad_option='zero'):
159
+ """
160
+ conds: [b, t=16, h=29]
161
+ idx: long, time index of the selected frame
162
+ """
163
+ idx = max(0, idx)
164
+ idx = min(idx, conds.shape[0]-1)
165
+ smo_half_win_size = smo_win_size//2
166
+ left_i = idx - smo_half_win_size
167
+ right_i = idx + (smo_win_size - smo_half_win_size)
168
+ pad_left, pad_right = 0, 0
169
+ if left_i < 0:
170
+ pad_left = -left_i
171
+ left_i = 0
172
+ if right_i > conds.shape[0]:
173
+ pad_right = right_i - conds.shape[0]
174
+ right_i = conds.shape[0]
175
+ conds_win = conds[left_i:right_i]
176
+ if pad_left > 0:
177
+ if pad_option == 'zero':
178
+ conds_win = np.concatenate([np.zeros_like(conds_win)[:pad_left], conds_win], axis=0)
179
+ elif pad_option == 'edge':
180
+ edge_value = conds[0][np.newaxis, ...]
181
+ conds_win = np.concatenate([edge_value] * pad_left + [conds_win], axis=0)
182
+ else:
183
+ raise NotImplementedError
184
+ if pad_right > 0:
185
+ if pad_option == 'zero':
186
+ conds_win = np.concatenate([conds_win, np.zeros_like(conds_win)[:pad_right]], axis=0)
187
+ elif pad_option == 'edge':
188
+ edge_value = conds[-1][np.newaxis, ...]
189
+ conds_win = np.concatenate([conds_win] + [edge_value] * pad_right , axis=0)
190
+ else:
191
+ raise NotImplementedError
192
+ assert conds_win.shape[0] == smo_win_size
193
+ return conds_win
194
+
195
+
196
+ def load_processed_data(processed_dir):
197
+ # load necessary files
198
+ background_img_name = os.path.join(processed_dir, "bg.jpg")
199
+ assert os.path.exists(background_img_name)
200
+ head_img_dir = os.path.join(processed_dir, "head_imgs")
201
+ torso_img_dir = os.path.join(processed_dir, "inpaint_torso_imgs")
202
+ gt_img_dir = os.path.join(processed_dir, "gt_imgs")
203
+
204
+ hubert_npy_name = os.path.join(processed_dir, "aud_hubert.npy")
205
+ mel_f0_npy_name = os.path.join(processed_dir, "aud_mel_f0.npy")
206
+ coeff_npy_name = os.path.join(processed_dir, "coeff_fit_mp.npy")
207
+ lm2d_npy_name = os.path.join(processed_dir, "lms_2d.npy")
208
+
209
+ ret_dict = {}
210
+
211
+ ret_dict['bg_img'] = imageio.imread(background_img_name)
212
+ ret_dict['H'], ret_dict['W'] = ret_dict['bg_img'].shape[:2]
213
+ ret_dict['focal'], ret_dict['cx'], ret_dict['cy'] = face_model.focal, face_model.center, face_model.center
214
+
215
+ print("loading lm2d coeff ...")
216
+ lm2d_arr = np.load(lm2d_npy_name)
217
+ face_rect_lst = []
218
+ lip_rect_lst = []
219
+ for lm2d in lm2d_arr:
220
+ if len(lm2d) in [468, 478]:
221
+ lm2d = lm2d[index_lm68_from_lm468]
222
+ face_rect = get_face_rect(lm2d, ret_dict['H'], ret_dict['W'])
223
+ lip_rect = get_lip_rect(lm2d, ret_dict['H'], ret_dict['W'])
224
+ face_rect_lst.append(face_rect)
225
+ lip_rect_lst.append(lip_rect)
226
+ face_rects = np.stack(face_rect_lst, axis=0) # [T, 4]
227
+
228
+ print("loading fitted 3dmm coeff ...")
229
+ coeff_dict = np.load(coeff_npy_name, allow_pickle=True).tolist()
230
+ identity_arr = coeff_dict['id']
231
+ exp_arr = coeff_dict['exp']
232
+ ret_dict['id'] = identity_arr
233
+ ret_dict['exp'] = exp_arr
234
+ euler_arr = ret_dict['euler'] = coeff_dict['euler']
235
+ trans_arr = ret_dict['trans'] = coeff_dict['trans']
236
+ print("calculating lm3d ...")
237
+ idexp_lm3d_arr = face3d_helper.reconstruct_idexp_lm3d(torch.from_numpy(identity_arr), torch.from_numpy(exp_arr)).cpu().numpy().reshape([-1, 68*3])
238
+ len_motion = len(idexp_lm3d_arr)
239
+ video_idexp_lm3d_mean = idexp_lm3d_arr.mean(axis=0)
240
+ video_idexp_lm3d_std = idexp_lm3d_arr.std(axis=0)
241
+ ret_dict['idexp_lm3d'] = idexp_lm3d_arr
242
+ ret_dict['idexp_lm3d_mean'] = video_idexp_lm3d_mean
243
+ ret_dict['idexp_lm3d_std'] = video_idexp_lm3d_std
244
+
245
+ # now we convert the euler_trans from deep3d convention to adnerf convention
246
+ eulers = torch.FloatTensor(euler_arr)
247
+ trans = torch.FloatTensor(trans_arr)
248
+ rots = face_model.compute_rotation(eulers) # rotation matrix is a better intermediate for convention-transplan than euler
249
+
250
+ # handle the camera pose to geneface's convention
251
+ trans[:, 2] = 10 - trans[:, 2] # 抵消fit阶段的to_camera操作,即trans[...,2] = 10 - trans[...,2]
252
+ rots = rots.permute(0, 2, 1)
253
+ trans[:, 2] = - trans[:,2] # 因为intrinsic proj不同
254
+ # below is the NeRF camera preprocessing strategy, see `save_transforms` in data_util/process.py
255
+ trans = trans / 10.0
256
+ rots_inv = rots.permute(0, 2, 1)
257
+ trans_inv = - torch.bmm(rots_inv, trans.unsqueeze(2))
258
+
259
+ pose = torch.eye(4, dtype=torch.float32).unsqueeze(0).repeat([len_motion, 1, 1]) # [T, 4, 4]
260
+ pose[:, :3, :3] = rots_inv
261
+ pose[:, :3, 3] = trans_inv[:, :, 0]
262
+ c2w_transform_matrices = pose.numpy()
263
+
264
+ # process the audio features used for postnet training
265
+ print("loading hubert ...")
266
+ hubert_features = np.load(hubert_npy_name)
267
+ print("loading Mel and F0 ...")
268
+ mel_f0_features = np.load(mel_f0_npy_name, allow_pickle=True).tolist()
269
+
270
+ ret_dict['hubert'] = hubert_features
271
+ ret_dict['mel'] = mel_f0_features['mel']
272
+ ret_dict['f0'] = mel_f0_features['f0']
273
+
274
+ # obtaining train samples
275
+ frame_indices = list(range(len_motion))
276
+ num_train = len_motion // 11 * 10
277
+ train_indices = frame_indices[:num_train]
278
+ val_indices = frame_indices[num_train:]
279
+
280
+ for split in ['train', 'val']:
281
+ if split == 'train':
282
+ indices = train_indices
283
+ samples = []
284
+ ret_dict['train_samples'] = samples
285
+ elif split == 'val':
286
+ indices = val_indices
287
+ samples = []
288
+ ret_dict['val_samples'] = samples
289
+
290
+ for idx in indices:
291
+ sample = {}
292
+ sample['idx'] = idx
293
+ sample['head_img_fname'] = os.path.join(head_img_dir,f"{idx:08d}.png")
294
+ sample['torso_img_fname'] = os.path.join(torso_img_dir,f"{idx:08d}.png")
295
+ sample['gt_img_fname'] = os.path.join(gt_img_dir,f"{idx:08d}.jpg")
296
+ # assert os.path.exists(sample['head_img_fname']) and os.path.exists(sample['torso_img_fname']) and os.path.exists(sample['gt_img_fname'])
297
+ sample['face_rect'] = face_rects[idx]
298
+ sample['lip_rect'] = lip_rect_lst[idx]
299
+ sample['c2w'] = c2w_transform_matrices[idx]
300
+ samples.append(sample)
301
+ return ret_dict
302
+
303
+
304
+ class Binarizer:
305
+ def __init__(self):
306
+ self.data_dir = 'data/'
307
+
308
+ def parse(self, video_id):
309
+ processed_dir = os.path.join(self.data_dir, 'processed/videos', video_id)
310
+ binary_dir = os.path.join(self.data_dir, 'binary/videos', video_id)
311
+ out_fname = os.path.join(binary_dir, "trainval_dataset.npy")
312
+ os.makedirs(binary_dir, exist_ok=True)
313
+ ret = load_processed_data(processed_dir)
314
+ mel_name = os.path.join(processed_dir, 'aud_mel_f0.npy')
315
+ mel_f0_dict = np.load(mel_name, allow_pickle=True).tolist()
316
+ ret.update(mel_f0_dict)
317
+ np.save(out_fname, ret, allow_pickle=True)
318
+
319
+
320
+
321
+ if __name__ == '__main__':
322
+ from argparse import ArgumentParser
323
+ parser = ArgumentParser()
324
+ parser.add_argument('--video_id', type=str, default='May', help='')
325
+ args = parser.parse_args()
326
+ ### Process Single Long Audio for NeRF dataset
327
+ video_id = args.video_id
328
+ face_model = ParametricFaceModel(bfm_folder='deep_3drecon/BFM',
329
+ camera_distance=10, focal=1015)
330
+ face_model.to("cpu")
331
+ face3d_helper = Face3DHelper()
332
+
333
+ binarizer = Binarizer()
334
+ binarizer.parse(video_id)
335
+ print(f"Binarization for {video_id} Done!")
data_gen/runs/binarizer_th1kh.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ from scipy.misc import face
4
+ import torch
5
+ from tqdm import trange
6
+ import pickle
7
+ from copy import deepcopy
8
+
9
+ from data_util.face3d_helper import Face3DHelper
10
+ from utils.commons.indexed_datasets import IndexedDataset, IndexedDatasetBuilder
11
+
12
+
13
+ def load_video_npy(fn):
14
+ assert fn.endswith("_coeff_fit_mp.npy")
15
+ ret_dict = np.load(fn,allow_pickle=True).item()
16
+ video_dict = {
17
+ 'euler': ret_dict['euler'], # [T, 3]
18
+ 'trans': ret_dict['trans'], # [T, 3]
19
+ 'id': ret_dict['id'], # [T, 80]
20
+ 'exp': ret_dict['exp'], # [T, 64]
21
+ }
22
+ return video_dict
23
+
24
+ def cal_lm3d_in_video_dict(video_dict, face3d_helper):
25
+ identity = video_dict['id']
26
+ exp = video_dict['exp']
27
+ idexp_lm3d = face3d_helper.reconstruct_idexp_lm3d(identity, exp).cpu().numpy()
28
+ video_dict['idexp_lm3d'] = idexp_lm3d
29
+
30
+
31
+ def load_audio_npy(fn):
32
+ assert fn.endswith(".npy")
33
+ ret_dict = np.load(fn,allow_pickle=True).item()
34
+ audio_dict = {
35
+ "mel": ret_dict['mel'], # [T, 80]
36
+ "f0": ret_dict['f0'], # [T,1]
37
+ }
38
+ return audio_dict
39
+
40
+
41
+ if __name__ == '__main__':
42
+ face3d_helper = Face3DHelper(use_gpu=False)
43
+
44
+ import glob,tqdm
45
+ prefixs = ['val', 'train']
46
+ binarized_ds_path = "data/binary/th1kh"
47
+ os.makedirs(binarized_ds_path, exist_ok=True)
48
+ for prefix in prefixs:
49
+ databuilder = IndexedDatasetBuilder(os.path.join(binarized_ds_path, prefix), gzip=False, default_idx_size=1024*1024*1024*2)
50
+ raw_base_dir = '/mnt/bn/ailabrenyi/entries/yezhenhui/datasets/raw/TH1KH_512/video'
51
+ mp4_names = glob.glob(os.path.join(raw_base_dir, '*.mp4'))
52
+ mp4_names = mp4_names[:1000]
53
+ cnt = 0
54
+ scnt = 0
55
+ pbar = tqdm.tqdm(enumerate(mp4_names), total=len(mp4_names))
56
+ for i, mp4_name in pbar:
57
+ cnt += 1
58
+ if prefix == 'train':
59
+ if i % 100 == 0:
60
+ continue
61
+ else:
62
+ if i % 100 != 0:
63
+ continue
64
+ hubert_npy_name = mp4_name.replace("/video/", "/hubert/").replace(".mp4", "_hubert.npy")
65
+ audio_npy_name = mp4_name.replace("/video/", "/mel_f0/").replace(".mp4", "_mel_f0.npy")
66
+ video_npy_name = mp4_name.replace("/video/", "/coeff_fit_mp/").replace(".mp4", "_coeff_fit_mp.npy")
67
+ if not os.path.exists(audio_npy_name):
68
+ print(f"Skip item for audio npy not found.")
69
+ continue
70
+ if not os.path.exists(video_npy_name):
71
+ print(f"Skip item for video npy not found.")
72
+ continue
73
+ if (not os.path.exists(hubert_npy_name)):
74
+ print(f"Skip item for hubert_npy not found.")
75
+ continue
76
+ audio_dict = load_audio_npy(audio_npy_name)
77
+ hubert = np.load(hubert_npy_name)
78
+ video_dict = load_video_npy(video_npy_name)
79
+ com_img_dir = mp4_name.replace("/video/", "/com_imgs/").replace(".mp4", "")
80
+ num_com_imgs = len(glob.glob(os.path.join(com_img_dir, '*')))
81
+ num_frames = len(video_dict['exp'])
82
+ if num_com_imgs != num_frames:
83
+ print(f"Skip item for length mismatch.")
84
+ continue
85
+ mel = audio_dict['mel']
86
+ if mel.shape[0] < 32: # the video is shorter than 0.6s
87
+ print(f"Skip item for too short.")
88
+ continue
89
+
90
+ audio_dict.update(video_dict)
91
+ audio_dict['item_id'] = os.path.basename(mp4_name)[:-4]
92
+ audio_dict['hubert'] = hubert # [T_x, hid=1024]
93
+ audio_dict['img_dir'] = com_img_dir
94
+
95
+
96
+ databuilder.add_item(audio_dict)
97
+ scnt += 1
98
+ pbar.set_postfix({'success': scnt, 'success rate': scnt / cnt})
99
+ databuilder.finalize()
100
+ print(f"{prefix} set has {cnt} samples!")
data_gen/runs/nerf/process_guide.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 温馨提示:第一次执行可以先一步步跑完下面的命令行,把环境跑通后,之后可以直接运行同目录的run.sh,一键完成下面的所有步骤。
2
+
3
+ # Step0. 将视频Crop到512x512分辨率,25FPS,确保每一帧都有目标人脸
4
+ ```
5
+ ffmpeg -i data/raw/videos/${VIDEO_ID}.mp4 -vf fps=25,scale=w=512:h=512 -qmin 1 -q:v 1 data/raw/videos/${VIDEO_ID}_512.mp4
6
+ mv data/raw/videos/${VIDEO_ID}.mp4 data/raw/videos/${VIDEO_ID}_to_rm.mp4
7
+ mv data/raw/videos/${VIDEO_ID}_512.mp4 data/raw/videos/${VIDEO_ID}.mp4
8
+ ```
9
+ # step1: 提取音频特征, 如mel, f0, hubuert, esperanto
10
+ ```
11
+ export CUDA_VISIBLE_DEVICES=0
12
+ export VIDEO_ID=May
13
+ mkdir -p data/processed/videos/${VIDEO_ID}
14
+ ffmpeg -i data/raw/videos/${VIDEO_ID}.mp4 -f wav -ar 16000 data/processed/videos/${VIDEO_ID}/aud.wav
15
+ python data_gen/utils/process_audio/extract_hubert.py --video_id=${VIDEO_ID}
16
+ python data_gen/utils/process_audio/extract_mel_f0.py --video_id=${VIDEO_ID}
17
+ ```
18
+
19
+ # Step2. 提取图片
20
+ ```
21
+ export VIDEO_ID=May
22
+ export CUDA_VISIBLE_DEVICES=0
23
+ mkdir -p data/processed/videos/${VIDEO_ID}/gt_imgs
24
+ ffmpeg -i data/raw/videos/${VIDEO_ID}.mp4 -vf fps=25,scale=w=512:h=512 -qmin 1 -q:v 1 -start_number 0 data/processed/videos/${VIDEO_ID}/gt_imgs/%08d.jpg
25
+ python data_gen/utils/process_video/extract_segment_imgs.py --ds_name=nerf --vid_dir=data/raw/videos/${VIDEO_ID}.mp4 # extract image, segmap, and background
26
+ ```
27
+
28
+ # Step3. 提取lm2d_mediapipe
29
+ ### 提取2D landmark用于之后Fit 3DMM
30
+ ### num_workers是本机上的CPU worker数量;total_process是使用的机器数;process_id是本机的编号
31
+
32
+ ```
33
+ export VIDEO_ID=May
34
+ python data_gen/utils/process_video/extract_lm2d.py --ds_name=nerf --vid_dir=data/raw/videos/${VIDEO_ID}.mp4
35
+ ```
36
+
37
+ # Step3. fit 3dmm
38
+ ```
39
+ export VIDEO_ID=May
40
+ export CUDA_VISIBLE_DEVICES=0
41
+ python data_gen/utils/process_video/fit_3dmm_landmark.py --ds_name=nerf --vid_dir=data/raw/videos/${VIDEO_ID}.mp4 --reset --debug --id_mode=global
42
+ ```
43
+
44
+ # Step4. Binarize
45
+ ```
46
+ export VIDEO_ID=May
47
+ python data_gen/runs/binarizer_nerf.py --video_id=${VIDEO_ID}
48
+ ```
49
+ 可以看到在`data/binary/videos/Mayssss`目录下得到了数据集。
data_gen/runs/nerf/run.sh ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # usage: CUDA_VISIBLE_DEVICES=0 bash data_gen/runs/nerf/run.sh <VIDEO_ID>
2
+ # please place video to data/raw/videos/${VIDEO_ID}.mp4
3
+ VIDEO_ID=$1
4
+ echo Processing $VIDEO_ID
5
+
6
+ echo Resizing the video to 512x512
7
+ ffmpeg -i data/raw/videos/${VIDEO_ID}.mp4 -vf fps=25,scale=w=512:h=512 -qmin 1 -q:v 1 -y data/raw/videos/${VIDEO_ID}_512.mp4
8
+ mv data/raw/videos/${VIDEO_ID}.mp4 data/raw/videos/${VIDEO_ID}_to_rm.mp4
9
+ mv data/raw/videos/${VIDEO_ID}_512.mp4 data/raw/videos/${VIDEO_ID}.mp4
10
+ echo Done
11
+ echo The old video is moved to data/raw/videos/${VIDEO_ID}.mp4 data/raw/videos/${VIDEO_ID}_to_rm.mp4
12
+
13
+ echo mkdir -p data/processed/videos/${VIDEO_ID}
14
+ mkdir -p data/processed/videos/${VIDEO_ID}
15
+ echo Done
16
+
17
+ # extract audio file from the training video
18
+ echo ffmpeg -i data/raw/videos/${VIDEO_ID}.mp4 -f wav -ar 16000 -v quiet -y data/processed/videos/${VIDEO_ID}/aud.wav
19
+ ffmpeg -i data/raw/videos/${VIDEO_ID}.mp4 -f wav -ar 16000 -v quiet -y data/processed/videos/${VIDEO_ID}/aud.wav
20
+ echo Done
21
+
22
+ # extract hubert_mel_f0 from audio
23
+ echo python data_gen/utils/process_audio/extract_hubert.py --video_id=${VIDEO_ID}
24
+ python data_gen/utils/process_audio/extract_hubert.py --video_id=${VIDEO_ID}
25
+ echo python data_gen/utils/process_audio/extract_mel_f0.py --video_id=${VIDEO_ID}
26
+ python data_gen/utils/process_audio/extract_mel_f0.py --video_id=${VIDEO_ID}
27
+ echo Done
28
+
29
+ # extract segment images
30
+ echo mkdir -p data/processed/videos/${VIDEO_ID}/gt_imgs
31
+ mkdir -p data/processed/videos/${VIDEO_ID}/gt_imgs
32
+ echo ffmpeg -i data/raw/videos/${VIDEO_ID}.mp4 -vf fps=25,scale=w=512:h=512 -qmin 1 -q:v 1 -start_number 0 -v quiet data/processed/videos/${VIDEO_ID}/gt_imgs/%08d.jpg
33
+ ffmpeg -i data/raw/videos/${VIDEO_ID}.mp4 -vf fps=25,scale=w=512:h=512 -qmin 1 -q:v 1 -start_number 0 -v quiet data/processed/videos/${VIDEO_ID}/gt_imgs/%08d.jpg
34
+ echo Done
35
+
36
+ echo python data_gen/utils/process_video/extract_segment_imgs.py --ds_name=nerf --vid_dir=data/raw/videos/${VIDEO_ID}.mp4 # extract image, segmap, and background
37
+ python data_gen/utils/process_video/extract_segment_imgs.py --ds_name=nerf --vid_dir=data/raw/videos/${VIDEO_ID}.mp4 # extract image, segmap, and background
38
+ echo Done
39
+
40
+ echo python data_gen/utils/process_video/extract_lm2d.py --ds_name=nerf --vid_dir=data/raw/videos/${VIDEO_ID}.mp4
41
+ python data_gen/utils/process_video/extract_lm2d.py --ds_name=nerf --vid_dir=data/raw/videos/${VIDEO_ID}.mp4
42
+ echo Done
43
+
44
+ pkill -f void*
45
+ echo python data_gen/utils/process_video/fit_3dmm_landmark.py --ds_name=nerf --vid_dir=data/raw/videos/${VIDEO_ID}.mp4 --reset --debug --id_mode=global
46
+ python data_gen/utils/process_video/fit_3dmm_landmark.py --ds_name=nerf --vid_dir=data/raw/videos/${VIDEO_ID}.mp4 --reset --debug --id_mode=global
47
+ echo Done
48
+
49
+ echo python data_gen/runs/binarizer_nerf.py --video_id=${VIDEO_ID}
50
+ python data_gen/runs/binarizer_nerf.py --video_id=${VIDEO_ID}
51
+ echo Done
data_gen/utils/mp_feature_extractors/face_landmarker.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mediapipe as mp
2
+ from mediapipe.tasks import python
3
+ from mediapipe.tasks.python import vision
4
+ import numpy as np
5
+ import cv2
6
+ import os
7
+ import copy
8
+
9
+ # simplified mediapipe ldm at https://github.com/k-m-irfan/simplified_mediapipe_face_landmarks
10
+ index_lm141_from_lm478 = [70,63,105,66,107,55,65,52,53,46] + [300,293,334,296,336,285,295,282,283,276] + [33,246,161,160,159,158,157,173,133,155,154,153,145,144,163,7] + [263,466,388,387,386,385,384,398,362,382,381,380,374,373,390,249] + [78,191,80,81,82,13,312,311,310,415,308,324,318,402,317,14,87,178,88,95] + [61,185,40,39,37,0,267,269,270,409,291,375,321,405,314,17,84,181,91,146] + [10,338,297,332,284,251,389,356,454,323,361,288,397,365,379,378,400,377,152,148,176,149,150,136,172,58,132,93,234,127,162,21,54,103,67,109] + [468,469,470,471,472] + [473,474,475,476,477] + [64,4,294]
11
+ # lm141 without iris
12
+ index_lm131_from_lm478 = [70,63,105,66,107,55,65,52,53,46] + [300,293,334,296,336,285,295,282,283,276] + [33,246,161,160,159,158,157,173,133,155,154,153,145,144,163,7] + [263,466,388,387,386,385,384,398,362,382,381,380,374,373,390,249] + [78,191,80,81,82,13,312,311,310,415,308,324,318,402,317,14,87,178,88,95] + [61,185,40,39,37,0,267,269,270,409,291,375,321,405,314,17,84,181,91,146] + [10,338,297,332,284,251,389,356,454,323,361,288,397,365,379,378,400,377,152,148,176,149,150,136,172,58,132,93,234,127,162,21,54,103,67,109] + [64,4,294]
13
+
14
+ # face alignment lm68
15
+ index_lm68_from_lm478 = [127,234,93,132,58,136,150,176,152,400,379,365,288,361,323,454,356,70,63,105,66,107,336,296,334,293,300,168,197,5,4,75,97,2,326,305,
16
+ 33,160,158,133,153,144,362,385,387,263,373,380,61,40,37,0,267,270,291,321,314,17,84,91,78,81,13,311,308,402,14,178]
17
+ # used for weights for key parts
18
+ unmatch_mask_from_lm478 = [ 93, 127, 132, 234, 323, 356, 361, 454]
19
+ index_eye_from_lm478 = [33,246,161,160,159,158,157,173,133,155,154,153,145,144,163,7] + [263,466,388,387,386,385,384,398,362,382,381,380,374,373,390,249]
20
+ index_innerlip_from_lm478 = [78,191,80,81,82,13,312,311,310,415,308,324,318,402,317,14,87,178,88,95]
21
+ index_outerlip_from_lm478 = [61,185,40,39,37,0,267,269,270,409,291,375,321,405,314,17,84,181,91,146]
22
+ index_withinmouth_from_lm478 = [76, 62] + [184, 183, 74, 72, 73, 41, 72, 38, 11, 12, 302, 268, 303, 271, 304, 272, 408, 407] + [292, 306] + [325, 307, 319, 320, 403, 404, 316, 315, 15, 16, 86, 85, 179, 180, 89, 90, 96, 77]
23
+ index_mouth_from_lm478 = index_innerlip_from_lm478 + index_outerlip_from_lm478 + index_withinmouth_from_lm478
24
+
25
+ index_yaw_from_lm68 = list(range(0, 17))
26
+ index_brow_from_lm68 = list(range(17, 27))
27
+ index_nose_from_lm68 = list(range(27, 36))
28
+ index_eye_from_lm68 = list(range(36, 48))
29
+ index_mouth_from_lm68 = list(range(48, 68))
30
+
31
+
32
+ def read_video_to_frames(video_name):
33
+ frames = []
34
+ cap = cv2.VideoCapture(video_name)
35
+ while cap.isOpened():
36
+ ret, frame_bgr = cap.read()
37
+ if frame_bgr is None:
38
+ break
39
+ frames.append(frame_bgr)
40
+ frames = np.stack(frames)
41
+ frames = np.flip(frames, -1) # BGR ==> RGB
42
+ return frames
43
+
44
+ class MediapipeLandmarker:
45
+ def __init__(self):
46
+ model_path = 'data_gen/utils/mp_feature_extractors/face_landmarker.task'
47
+ if not os.path.exists(model_path):
48
+ os.makedirs(os.path.dirname(model_path), exist_ok=True)
49
+ print("downloading face_landmarker model from mediapipe...")
50
+ model_url = 'https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/latest/face_landmarker.task'
51
+ os.system(f"wget {model_url}")
52
+ os.system(f"mv face_landmarker.task {model_path}")
53
+ print("download success")
54
+ base_options = python.BaseOptions(model_asset_path=model_path)
55
+ self.image_mode_options = vision.FaceLandmarkerOptions(base_options=base_options,
56
+ running_mode=vision.RunningMode.IMAGE, # IMAGE, VIDEO, LIVE_STREAM
57
+ num_faces=1)
58
+ self.video_mode_options = vision.FaceLandmarkerOptions(base_options=base_options,
59
+ running_mode=vision.RunningMode.VIDEO, # IMAGE, VIDEO, LIVE_STREAM
60
+ num_faces=1)
61
+
62
+ def extract_lm478_from_img_name(self, img_name):
63
+ img = cv2.imread(img_name)
64
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
65
+ img_lm478 = self.extract_lm478_from_img(img)
66
+ return img_lm478
67
+
68
+ def extract_lm478_from_img(self, img):
69
+ img_landmarker = vision.FaceLandmarker.create_from_options(self.image_mode_options)
70
+ frame = mp.Image(image_format=mp.ImageFormat.SRGB, data=img.astype(np.uint8))
71
+ img_face_landmarker_result = img_landmarker.detect(image=frame)
72
+ img_ldm_i = img_face_landmarker_result.face_landmarks[0]
73
+ img_face_landmarks = np.array([[l.x, l.y, l.z] for l in img_ldm_i])
74
+ H, W, _ = img.shape
75
+ img_lm478 = np.array(img_face_landmarks)[:, :2] * np.array([W, H]).reshape([1,2]) # [478, 2]
76
+ return img_lm478
77
+
78
+ def extract_lm478_from_video_name(self, video_name, fps=25, anti_smooth_factor=2):
79
+ frames = read_video_to_frames(video_name)
80
+ img_lm478, vid_lm478 = self.extract_lm478_from_frames(frames, fps, anti_smooth_factor)
81
+ return img_lm478, vid_lm478
82
+
83
+ def extract_lm478_from_frames(self, frames, fps=25, anti_smooth_factor=20):
84
+ """
85
+ frames: RGB, uint8
86
+ anti_smooth_factor: float, 对video模式的interval进行修改, 1代表无修改, 越大越接近image mode
87
+ """
88
+ img_mpldms = []
89
+ vid_mpldms = []
90
+ img_landmarker = vision.FaceLandmarker.create_from_options(self.image_mode_options)
91
+ vid_landmarker = vision.FaceLandmarker.create_from_options(self.video_mode_options)
92
+
93
+ for i in range(len(frames)):
94
+ frame = mp.Image(image_format=mp.ImageFormat.SRGB, data=frames[i].astype(np.uint8))
95
+ img_face_landmarker_result = img_landmarker.detect(image=frame)
96
+ vid_face_landmarker_result = vid_landmarker.detect_for_video(image=frame, timestamp_ms=int((1000/fps)*anti_smooth_factor*i))
97
+ try:
98
+ img_ldm_i = img_face_landmarker_result.face_landmarks[0]
99
+ vid_ldm_i = vid_face_landmarker_result.face_landmarks[0]
100
+ except:
101
+ print(f"Warning: failed detect ldm in idx={i}, use previous frame results.")
102
+ img_face_landmarks = np.array([[l.x, l.y, l.z] for l in img_ldm_i])
103
+ vid_face_landmarks = np.array([[l.x, l.y, l.z] for l in vid_ldm_i])
104
+ img_mpldms.append(img_face_landmarks)
105
+ vid_mpldms.append(vid_face_landmarks)
106
+ img_lm478 = np.stack(img_mpldms)[..., :2]
107
+ vid_lm478 = np.stack(vid_mpldms)[..., :2]
108
+ bs, H, W, _ = frames.shape
109
+ img_lm478 = np.array(img_lm478)[..., :2] * np.array([W, H]).reshape([1,1,2]) # [T, 478, 2]
110
+ vid_lm478 = np.array(vid_lm478)[..., :2] * np.array([W, H]).reshape([1,1,2]) # [T, 478, 2]
111
+ return img_lm478, vid_lm478
112
+
113
+ def combine_vid_img_lm478_to_lm68(self, img_lm478, vid_lm478):
114
+ img_lm68 = img_lm478[:, index_lm68_from_lm478]
115
+ vid_lm68 = vid_lm478[:, index_lm68_from_lm478]
116
+ combined_lm68 = copy.deepcopy(img_lm68)
117
+ combined_lm68[:, index_yaw_from_lm68] = vid_lm68[:, index_yaw_from_lm68]
118
+ combined_lm68[:, index_brow_from_lm68] = vid_lm68[:, index_brow_from_lm68]
119
+ combined_lm68[:, index_nose_from_lm68] = vid_lm68[:, index_nose_from_lm68]
120
+ return combined_lm68
121
+
122
+ def combine_vid_img_lm478_to_lm478(self, img_lm478, vid_lm478):
123
+ combined_lm478 = copy.deepcopy(vid_lm478)
124
+ combined_lm478[:, index_mouth_from_lm478] = img_lm478[:, index_mouth_from_lm478]
125
+ combined_lm478[:, index_eye_from_lm478] = img_lm478[:, index_eye_from_lm478]
126
+ return combined_lm478
127
+
128
+ if __name__ == '__main__':
129
+ landmarker = MediapipeLandmarker()
130
+ ret = landmarker.extract_lm478_from_video_name("00000.mp4")
data_gen/utils/mp_feature_extractors/face_landmarker.task ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64184e229b263107bc2b804c6625db1341ff2bb731874b0bcc2fe6544e0bc9ff
3
+ size 3758596
data_gen/utils/mp_feature_extractors/mp_segmenter.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import copy
3
+ import numpy as np
4
+ import tqdm
5
+ import mediapipe as mp
6
+ import torch
7
+ from mediapipe.tasks import python
8
+ from mediapipe.tasks.python import vision
9
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm, multiprocess_run
10
+ from utils.commons.tensor_utils import convert_to_np
11
+ from sklearn.neighbors import NearestNeighbors
12
+
13
+ def scatter_np(condition_img, classSeg=5):
14
+ # def scatter(condition_img, classSeg=19, label_size=(512, 512)):
15
+ batch, c, height, width = condition_img.shape
16
+ # if height != label_size[0] or width != label_size[1]:
17
+ # condition_img= F.interpolate(condition_img, size=label_size, mode='nearest')
18
+ input_label = np.zeros([batch, classSeg, condition_img.shape[2], condition_img.shape[3]]).astype(np.int_)
19
+ # input_label = torch.zeros(batch, classSeg, *label_size, device=condition_img.device)
20
+ np.put_along_axis(input_label, condition_img, 1, 1)
21
+ return input_label
22
+
23
+ def scatter(condition_img, classSeg=19):
24
+ # def scatter(condition_img, classSeg=19, label_size=(512, 512)):
25
+ batch, c, height, width = condition_img.size()
26
+ # if height != label_size[0] or width != label_size[1]:
27
+ # condition_img= F.interpolate(condition_img, size=label_size, mode='nearest')
28
+ input_label = torch.zeros(batch, classSeg, condition_img.shape[2], condition_img.shape[3], device=condition_img.device)
29
+ # input_label = torch.zeros(batch, classSeg, *label_size, device=condition_img.device)
30
+ return input_label.scatter_(1, condition_img.long(), 1)
31
+
32
+ def encode_segmap_mask_to_image(segmap):
33
+ # rgb
34
+ _,h,w = segmap.shape
35
+ encoded_img = np.ones([h,w,3],dtype=np.uint8) * 255
36
+ colors = [(255,255,255),(255,255,0),(255,0,255),(0,255,255),(255,0,0),(0,255,0)]
37
+ for i, color in enumerate(colors):
38
+ mask = segmap[i].astype(int)
39
+ index = np.where(mask != 0)
40
+ encoded_img[index[0], index[1], :] = np.array(color)
41
+ return encoded_img.astype(np.uint8)
42
+
43
+ def decode_segmap_mask_from_image(encoded_img):
44
+ # rgb
45
+ colors = [(255,255,255),(255,255,0),(255,0,255),(0,255,255),(255,0,0),(0,255,0)]
46
+ bg = (encoded_img[..., 0] == 255) & (encoded_img[..., 1] == 255) & (encoded_img[..., 2] == 255)
47
+ hair = (encoded_img[..., 0] == 255) & (encoded_img[..., 1] == 255) & (encoded_img[..., 2] == 0)
48
+ body_skin = (encoded_img[..., 0] == 255) & (encoded_img[..., 1] == 0) & (encoded_img[..., 2] == 255)
49
+ face_skin = (encoded_img[..., 0] == 0) & (encoded_img[..., 1] == 255) & (encoded_img[..., 2] == 255)
50
+ clothes = (encoded_img[..., 0] == 255) & (encoded_img[..., 1] == 0) & (encoded_img[..., 2] == 0)
51
+ others = (encoded_img[..., 0] == 0) & (encoded_img[..., 1] == 255) & (encoded_img[..., 2] == 0)
52
+ segmap = np.stack([bg, hair, body_skin, face_skin, clothes, others], axis=0)
53
+ return segmap.astype(np.uint8)
54
+
55
+ def read_video_frame(video_name, frame_id):
56
+ # https://blog.csdn.net/bby1987/article/details/108923361
57
+ # frame_num = video_capture.get(cv2.CAP_PROP_FRAME_COUNT) # ==> 总帧数
58
+ # fps = video_capture.get(cv2.CAP_PROP_FPS) # ==> 帧率
59
+ # width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH) # ==> 视频宽度
60
+ # height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT) # ==> 视频高度
61
+ # pos = video_capture.get(cv2.CAP_PROP_POS_FRAMES) # ==> 句柄位置
62
+ # video_capture.set(cv2.CAP_PROP_POS_FRAMES, 1000) # ==> 设置句柄位置
63
+ # pos = video_capture.get(cv2.CAP_PROP_POS_FRAMES) # ==> 此时 pos = 1000.0
64
+ # video_capture.release()
65
+ vr = cv2.VideoCapture(video_name)
66
+ vr.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
67
+ _, frame = vr.read()
68
+ return frame
69
+
70
+ def decode_segmap_mask_from_segmap_video_frame(video_frame):
71
+ # video_frame: 0~255 BGR, obtained by read_video_frame
72
+ def assign_values(array):
73
+ remainder = array % 40 # 计算数组中每个值与40的余数
74
+ assigned_values = np.where(remainder <= 20, array - remainder, array + (40 - remainder))
75
+ return assigned_values
76
+ segmap = video_frame.mean(-1)
77
+ segmap = assign_values(segmap) // 40 # [H, W] with value 0~5
78
+ segmap_mask = scatter_np(segmap[None, None, ...], classSeg=6)[0] # [6, H, W]
79
+ return segmap.astype(np.uint8)
80
+
81
+ def extract_background(img_lst, segmap_lst=None):
82
+ """
83
+ img_lst: list of rgb ndarray
84
+ """
85
+ # only use 1/20 images
86
+ num_frames = len(img_lst)
87
+ img_lst = img_lst[::20] if num_frames > 20 else img_lst[0:1]
88
+
89
+ if segmap_lst is not None:
90
+ segmap_lst = segmap_lst[::20] if num_frames > 20 else segmap_lst[0:1]
91
+ assert len(img_lst) == len(segmap_lst)
92
+ # get H/W
93
+ h, w = img_lst[0].shape[:2]
94
+
95
+ # nearest neighbors
96
+ all_xys = np.mgrid[0:h, 0:w].reshape(2, -1).transpose()
97
+ distss = []
98
+ for idx, img in enumerate(img_lst):
99
+ if segmap_lst is not None:
100
+ segmap = segmap_lst[idx]
101
+ else:
102
+ segmap = seg_model._cal_seg_map(img)
103
+ bg = (segmap[0]).astype(bool)
104
+ fg_xys = np.stack(np.nonzero(~bg)).transpose(1, 0)
105
+ nbrs = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(fg_xys)
106
+ dists, _ = nbrs.kneighbors(all_xys)
107
+ distss.append(dists)
108
+
109
+ distss = np.stack(distss)
110
+ max_dist = np.max(distss, 0)
111
+ max_id = np.argmax(distss, 0)
112
+
113
+ bc_pixs = max_dist > 10 # 5
114
+ bc_pixs_id = np.nonzero(bc_pixs)
115
+ bc_ids = max_id[bc_pixs]
116
+
117
+ num_pixs = distss.shape[1]
118
+ imgs = np.stack(img_lst).reshape(-1, num_pixs, 3)
119
+
120
+ bg_img = np.zeros((h*w, 3), dtype=np.uint8)
121
+ bg_img[bc_pixs_id, :] = imgs[bc_ids, bc_pixs_id, :]
122
+ bg_img = bg_img.reshape(h, w, 3)
123
+
124
+ max_dist = max_dist.reshape(h, w)
125
+ bc_pixs = max_dist > 10 # 5
126
+ bg_xys = np.stack(np.nonzero(~bc_pixs)).transpose()
127
+ fg_xys = np.stack(np.nonzero(bc_pixs)).transpose()
128
+ nbrs = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(fg_xys)
129
+ distances, indices = nbrs.kneighbors(bg_xys)
130
+ bg_fg_xys = fg_xys[indices[:, 0]]
131
+ bg_img[bg_xys[:, 0], bg_xys[:, 1], :] = bg_img[bg_fg_xys[:, 0], bg_fg_xys[:, 1], :]
132
+ return bg_img
133
+
134
+
135
+ global_segmenter = None
136
+ def job_cal_seg_map_for_image(img, segmenter_options=None, segmenter=None):
137
+ """
138
+ 被 MediapipeSegmenter.multiprocess_cal_seg_map_for_a_video所使用, 专门用来处理单个长视频.
139
+ """
140
+ global global_segmenter
141
+ if segmenter is not None:
142
+ segmenter_actual = segmenter
143
+ else:
144
+ global_segmenter = vision.ImageSegmenter.create_from_options(segmenter_options) if global_segmenter is None else global_segmenter
145
+ segmenter_actual = global_segmenter
146
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=img)
147
+ out = segmenter_actual.segment(mp_image)
148
+ segmap = out.category_mask.numpy_view().copy() # [H, W]
149
+
150
+ segmap_mask = scatter_np(segmap[None, None, ...], classSeg=6)[0] # [6, H, W]
151
+ segmap_image = segmap[:, :, None].repeat(3, 2).astype(float)
152
+ segmap_image = (segmap_image * 40).astype(np.uint8)
153
+
154
+ return segmap_mask, segmap_image
155
+
156
+ class MediapipeSegmenter:
157
+ def __init__(self):
158
+ model_path = 'data_gen/utils/mp_feature_extractors/selfie_multiclass_256x256.tflite'
159
+ if not os.path.exists(model_path):
160
+ os.makedirs(os.path.dirname(model_path), exist_ok=True)
161
+ print("downloading segmenter model from mediapipe...")
162
+ os.system(f"wget https://storage.googleapis.com/mediapipe-models/image_segmenter/selfie_multiclass_256x256/float32/latest/selfie_multiclass_256x256.tflite")
163
+ os.system(f"mv selfie_multiclass_256x256.tflite {model_path}")
164
+ print("download success")
165
+ base_options = python.BaseOptions(model_asset_path=model_path)
166
+ self.options = vision.ImageSegmenterOptions(base_options=base_options,running_mode=vision.RunningMode.IMAGE, output_category_mask=True)
167
+ self.video_options = vision.ImageSegmenterOptions(base_options=base_options,running_mode=vision.RunningMode.VIDEO, output_category_mask=True)
168
+
169
+ def multiprocess_cal_seg_map_for_a_video(self, imgs, num_workers=4):
170
+ """
171
+ 并行处理单个长视频
172
+ imgs: list of rgb array in 0~255
173
+ """
174
+ segmap_masks = []
175
+ segmap_images = []
176
+ img_lst = [(self.options, imgs[i]) for i in range(len(imgs))]
177
+ for (i, res) in multiprocess_run_tqdm(job_cal_seg_map_for_image, args=img_lst, num_workers=num_workers, desc='extracting from a video in multi-process'):
178
+ segmap_mask, segmap_image = res
179
+ segmap_masks.append(segmap_mask)
180
+ segmap_images.append(segmap_image)
181
+ return segmap_masks, segmap_images
182
+
183
+ def _cal_seg_map_for_video(self, imgs, segmenter=None, return_onehot_mask=True, return_segmap_image=True):
184
+ segmenter = vision.ImageSegmenter.create_from_options(self.video_options) if segmenter is None else segmenter
185
+ assert return_onehot_mask or return_segmap_image # you should at least return one
186
+ segmap_masks = []
187
+ segmap_images = []
188
+ for i in tqdm.trange(len(imgs), desc="extracting segmaps from a video..."):
189
+ img = imgs[i]
190
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=img)
191
+ out = segmenter.segment_for_video(mp_image, 40 * i)
192
+ segmap = out.category_mask.numpy_view().copy() # [H, W]
193
+
194
+ if return_onehot_mask:
195
+ segmap_mask = scatter_np(segmap[None, None, ...], classSeg=6)[0] # [6, H, W]
196
+ segmap_masks.append(segmap_mask)
197
+ if return_segmap_image:
198
+ segmap_image = segmap[:, :, None].repeat(3, 2).astype(float)
199
+ segmap_image = (segmap_image * 40).astype(np.uint8)
200
+ segmap_images.append(segmap_image)
201
+
202
+ if return_onehot_mask and return_segmap_image:
203
+ return segmap_masks, segmap_images
204
+ elif return_onehot_mask:
205
+ return segmap_masks
206
+ elif return_segmap_image:
207
+ return segmap_images
208
+
209
+ def _cal_seg_map(self, img, segmenter=None, return_onehot_mask=True):
210
+ """
211
+ segmenter: vision.ImageSegmenter.create_from_options(options)
212
+ img: numpy, [H, W, 3], 0~255
213
+ segmap: [C, H, W]
214
+ 0 - background
215
+ 1 - hair
216
+ 2 - body-skin
217
+ 3 - face-skin
218
+ 4 - clothes
219
+ 5 - others (accessories)
220
+ """
221
+ assert img.ndim == 3
222
+ segmenter = vision.ImageSegmenter.create_from_options(self.options) if segmenter is None else segmenter
223
+ image = mp.Image(image_format=mp.ImageFormat.SRGB, data=img)
224
+ out = segmenter.segment(image)
225
+ segmap = out.category_mask.numpy_view().copy() # [H, W]
226
+ if return_onehot_mask:
227
+ segmap = scatter_np(segmap[None, None, ...], classSeg=6)[0] # [6, H, W]
228
+ return segmap
229
+
230
+ def _seg_out_img_with_segmap(self, img, segmap, mode='head'):
231
+ """
232
+ img: [h,w,c], img is in 0~255, np
233
+ """
234
+ #
235
+ img = copy.deepcopy(img)
236
+ if mode == 'head':
237
+ selected_mask = segmap[[1,3,5] , :, :].sum(axis=0)[None,:] > 0.5 # glasses 也属于others
238
+ img[~selected_mask.repeat(3,axis=0).transpose(1,2,0)] = 0 # (-1,-1,-1) denotes black in our [-1,1] convention
239
+ # selected_mask = segmap[[1,3] , :, :].sum(dim=0, keepdim=True) > 0.5
240
+ elif mode == 'person':
241
+ selected_mask = segmap[[1,2,3,4,5], :, :].sum(axis=0)[None,:] > 0.5
242
+ img[~selected_mask.repeat(3,axis=0).transpose(1,2,0)] = 0 # (-1,-1,-1) denotes black in our [-1,1] convention
243
+ elif mode == 'torso':
244
+ selected_mask = segmap[[2,4], :, :].sum(axis=0)[None,:] > 0.5
245
+ img[~selected_mask.repeat(3,axis=0).transpose(1,2,0)] = 0 # (-1,-1,-1) denotes black in our [-1,1] convention
246
+ elif mode == 'torso_with_bg':
247
+ selected_mask = segmap[[0, 2,4], :, :].sum(axis=0)[None,:] > 0.5
248
+ img[~selected_mask.repeat(3,axis=0).transpose(1,2,0)] = 0 # (-1,-1,-1) denotes black in our [-1,1] convention
249
+ elif mode == 'bg':
250
+ selected_mask = segmap[[0], :, :].sum(axis=0)[None,:] > 0.5 # only seg out 0, which means background
251
+ img[~selected_mask.repeat(3,axis=0).transpose(1,2,0)] = 0 # (-1,-1,-1) denotes black in our [-1,1] convention
252
+ elif mode == 'full':
253
+ pass
254
+ else:
255
+ raise NotImplementedError()
256
+ return img, selected_mask
257
+
258
+ def _seg_out_img(self, img, segmenter=None, mode='head'):
259
+ """
260
+ imgs [H, W, 3] 0-255
261
+ return : person_img [B, 3, H, W]
262
+ """
263
+ segmenter = vision.ImageSegmenter.create_from_options(self.options) if segmenter is None else segmenter
264
+ segmap = self._cal_seg_map(img, segmenter=segmenter, return_onehot_mask=True) # [B, 19, H, W]
265
+ return self._seg_out_img_with_segmap(img, segmap, mode=mode)
266
+
267
+ def seg_out_imgs(self, img, mode='head'):
268
+ """
269
+ api for pytorch img, -1~1
270
+ img: [B, 3, H, W], -1~1
271
+ """
272
+ device = img.device
273
+ img = convert_to_np(img.permute(0, 2, 3, 1)) # [B, H, W, 3]
274
+ img = ((img + 1) * 127.5).astype(np.uint8)
275
+ img_lst = [copy.deepcopy(img[i]) for i in range(len(img))]
276
+ out_lst = []
277
+ for im in img_lst:
278
+ out = self._seg_out_img(im, mode=mode)
279
+ out_lst.append(out)
280
+ seg_imgs = np.stack(out_lst) # [B, H, W, 3]
281
+ seg_imgs = (seg_imgs - 127.5) / 127.5
282
+ seg_imgs = torch.from_numpy(seg_imgs).permute(0, 3, 1, 2).to(device)
283
+ return seg_imgs
284
+
285
+ if __name__ == '__main__':
286
+ import imageio, cv2, tqdm
287
+ import torchshow as ts
288
+ img = imageio.imread("1.png")
289
+ img = cv2.resize(img, (512,512))
290
+
291
+ seg_model = MediapipeSegmenter()
292
+ img = torch.tensor(img).unsqueeze(0).repeat([1, 1, 1, 1]).permute(0, 3,1,2)
293
+ img = (img-127.5)/127.5
294
+ out = seg_model.seg_out_imgs(img, 'torso')
295
+ ts.save(out,"torso.png")
296
+ out = seg_model.seg_out_imgs(img, 'head')
297
+ ts.save(out,"head.png")
298
+ out = seg_model.seg_out_imgs(img, 'bg')
299
+ ts.save(out,"bg.png")
300
+ img = convert_to_np(img.permute(0, 2, 3, 1)) # [B, H, W, 3]
301
+ img = ((img + 1) * 127.5).astype(np.uint8)
302
+ bg = extract_background(img)
303
+ ts.save(bg,"bg2.png")
data_gen/utils/mp_feature_extractors/selfie_multiclass_256x256.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6748b1253a99067ef71f7e26ca71096cd449baefa8f101900ea23016507e0e0
3
+ size 16371837
data_gen/utils/path_converter.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ class PathConverter():
5
+ def __init__(self):
6
+ self.prefixs = {
7
+ "vid": "/video/",
8
+ "gt": "/gt_imgs/",
9
+ "head": "/head_imgs/",
10
+ "torso": "/torso_imgs/",
11
+ "person": "/person_imgs/",
12
+ "torso_with_bg": "/torso_with_bg_imgs/",
13
+ "single_bg": "/bg_img/",
14
+ "bg": "/bg_imgs/",
15
+ "segmaps": "/segmaps/",
16
+ "inpaint_torso": "/inpaint_torso_imgs/",
17
+ "com": "/com_imgs/",
18
+ "inpaint_torso_with_com_bg": "/inpaint_torso_with_com_bg_imgs/",
19
+ }
20
+
21
+ def to(self, path: str, old_pattern: str, new_pattern: str):
22
+ return path.replace(self.prefixs[old_pattern], self.prefixs[new_pattern], 1)
23
+
24
+ pc = PathConverter()
data_gen/utils/process_audio/extract_hubert.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Wav2Vec2Processor, HubertModel
2
+ import soundfile as sf
3
+ import numpy as np
4
+ import torch
5
+ import os
6
+ from utils.commons.hparams import set_hparams, hparams
7
+
8
+
9
+ wav2vec2_processor = None
10
+ hubert_model = None
11
+
12
+
13
+ def get_hubert_from_16k_wav(wav_16k_name):
14
+ speech_16k, _ = sf.read(wav_16k_name)
15
+ hubert = get_hubert_from_16k_speech(speech_16k)
16
+ return hubert
17
+
18
+ @torch.no_grad()
19
+ def get_hubert_from_16k_speech(speech, device="cuda:0"):
20
+ global hubert_model, wav2vec2_processor
21
+ local_path = '/home/tiger/.cache/huggingface/hub/models--facebook--hubert-large-ls960-ft/snapshots/ece5fabbf034c1073acae96d5401b25be96709d8'
22
+ if hubert_model is None:
23
+ print("Loading the HuBERT Model...")
24
+ if os.path.exists(local_path):
25
+ hubert_model = HubertModel.from_pretrained(local_path)
26
+ else:
27
+ hubert_model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
28
+ hubert_model = hubert_model.to(device)
29
+ if wav2vec2_processor is None:
30
+ print("Loading the Wav2Vec2 Processor...")
31
+ if os.path.exists(local_path):
32
+ wav2vec2_processor = Wav2Vec2Processor.from_pretrained(local_path)
33
+ else:
34
+ wav2vec2_processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft")
35
+
36
+ if speech.ndim ==2:
37
+ speech = speech[:, 0] # [T, 2] ==> [T,]
38
+
39
+ input_values_all = wav2vec2_processor(speech, return_tensors="pt", sampling_rate=16000).input_values # [1, T]
40
+ input_values_all = input_values_all.to(device)
41
+ # For long audio sequence, due to the memory limitation, we cannot process them in one run
42
+ # HuBERT process the wav with a CNN of stride [5,2,2,2,2,2], making a stride of 320
43
+ # Besides, the kernel is [10,3,3,3,3,2,2], making 400 a fundamental unit to get 1 time step.
44
+ # So the CNN is euqal to a big Conv1D with kernel k=400 and stride s=320
45
+ # We have the equation to calculate out time step: T = floor((t-k)/s)
46
+ # To prevent overlap, we set each clip length of (K+S*(N-1)), where N is the expected length T of this clip
47
+ # The start point of next clip should roll back with a length of (kernel-stride) so it is stride * N
48
+ kernel = 400
49
+ stride = 320
50
+ clip_length = stride * 1000
51
+ num_iter = input_values_all.shape[1] // clip_length
52
+ expected_T = (input_values_all.shape[1] - (kernel-stride)) // stride
53
+ res_lst = []
54
+ for i in range(num_iter):
55
+ if i == 0:
56
+ start_idx = 0
57
+ end_idx = clip_length - stride + kernel
58
+ else:
59
+ start_idx = clip_length * i
60
+ end_idx = start_idx + (clip_length - stride + kernel)
61
+ input_values = input_values_all[:, start_idx: end_idx]
62
+ hidden_states = hubert_model.forward(input_values).last_hidden_state # [B=1, T=pts//320, hid=1024]
63
+ res_lst.append(hidden_states[0])
64
+ if num_iter > 0:
65
+ input_values = input_values_all[:, clip_length * num_iter:]
66
+ else:
67
+ input_values = input_values_all
68
+
69
+ if input_values.shape[1] >= kernel: # if the last batch is shorter than kernel_size, skip it
70
+ hidden_states = hubert_model(input_values).last_hidden_state # [B=1, T=pts//320, hid=1024]
71
+ res_lst.append(hidden_states[0])
72
+ ret = torch.cat(res_lst, dim=0).cpu() # [T, 1024]
73
+
74
+ assert abs(ret.shape[0] - expected_T) <= 1
75
+ if ret.shape[0] < expected_T: # if skipping the last short
76
+ ret = torch.cat([ret, ret[:, -1:, :].repeat([1,expected_T-ret.shape[0],1])], dim=1)
77
+ else:
78
+ ret = ret[:expected_T]
79
+
80
+ return ret
81
+
82
+
83
+ if __name__ == '__main__':
84
+ from argparse import ArgumentParser
85
+ parser = ArgumentParser()
86
+ parser.add_argument('--video_id', type=str, default='May', help='')
87
+ args = parser.parse_args()
88
+ ### Process Single Long Audio for NeRF dataset
89
+ person_id = args.video_id
90
+ wav_16k_name = f"data/processed/videos/{person_id}/aud.wav"
91
+ hubert_npy_name = f"data/processed/videos/{person_id}/aud_hubert.npy"
92
+ speech_16k, _ = sf.read(wav_16k_name)
93
+ hubert_hidden = get_hubert_from_16k_speech(speech_16k)
94
+ np.save(hubert_npy_name, hubert_hidden.detach().numpy())
95
+ print(f"Saved at {hubert_npy_name}")
data_gen/utils/process_audio/extract_mel_f0.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import glob
4
+ import os
5
+ import tqdm
6
+ import librosa
7
+ import parselmouth
8
+ from utils.commons.pitch_utils import f0_to_coarse
9
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
10
+ from utils.commons.os_utils import multiprocess_glob
11
+ from utils.audio.io import save_wav
12
+
13
+ from moviepy.editor import VideoFileClip
14
+ from utils.commons.hparams import hparams, set_hparams
15
+
16
+ def resample_wav(wav_name, out_name, sr=16000):
17
+ wav_raw, sr = librosa.core.load(wav_name, sr=sr)
18
+ save_wav(wav_raw, out_name, sr)
19
+
20
+ def split_wav(mp4_name, wav_name=None):
21
+ if wav_name is None:
22
+ wav_name = mp4_name.replace(".mp4", ".wav").replace("/video/", "/audio/")
23
+ if os.path.exists(wav_name):
24
+ return wav_name
25
+ os.makedirs(os.path.dirname(wav_name), exist_ok=True)
26
+
27
+ video = VideoFileClip(mp4_name,verbose=False)
28
+ dur = video.duration
29
+ audio = video.audio
30
+ assert audio is not None
31
+ audio.write_audiofile(wav_name,fps=16000,verbose=False,logger=None)
32
+ return wav_name
33
+
34
+ def librosa_pad_lr(x, fsize, fshift, pad_sides=1):
35
+ '''compute right padding (final frame) or both sides padding (first and final frames)
36
+ '''
37
+ assert pad_sides in (1, 2)
38
+ # return int(fsize // 2)
39
+ pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]
40
+ if pad_sides == 1:
41
+ return 0, pad
42
+ else:
43
+ return pad // 2, pad // 2 + pad % 2
44
+
45
+ def extract_mel_from_fname(wav_path,
46
+ fft_size=512,
47
+ hop_size=320,
48
+ win_length=512,
49
+ window="hann",
50
+ num_mels=80,
51
+ fmin=80,
52
+ fmax=7600,
53
+ eps=1e-6,
54
+ sample_rate=16000,
55
+ min_level_db=-100):
56
+ if isinstance(wav_path, str):
57
+ wav, _ = librosa.core.load(wav_path, sr=sample_rate)
58
+ else:
59
+ wav = wav_path
60
+
61
+ # get amplitude spectrogram
62
+ x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size,
63
+ win_length=win_length, window=window, center=False)
64
+ spc = np.abs(x_stft) # (n_bins, T)
65
+
66
+ # get mel basis
67
+ fmin = 0 if fmin == -1 else fmin
68
+ fmax = sample_rate / 2 if fmax == -1 else fmax
69
+ mel_basis = librosa.filters.mel(sr=sample_rate, n_fft=fft_size, n_mels=num_mels, fmin=fmin, fmax=fmax)
70
+ mel = mel_basis @ spc
71
+
72
+ mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T)
73
+ mel = mel.T
74
+
75
+ l_pad, r_pad = librosa_pad_lr(wav, fft_size, hop_size, 1)
76
+ wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0)
77
+
78
+ return wav.T, mel
79
+
80
+ def extract_f0_from_wav_and_mel(wav, mel,
81
+ hop_size=320,
82
+ audio_sample_rate=16000,
83
+ ):
84
+ time_step = hop_size / audio_sample_rate * 1000
85
+ f0_min = 80
86
+ f0_max = 750
87
+ f0 = parselmouth.Sound(wav, audio_sample_rate).to_pitch_ac(
88
+ time_step=time_step / 1000, voicing_threshold=0.6,
89
+ pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
90
+
91
+ delta_l = len(mel) - len(f0)
92
+ assert np.abs(delta_l) <= 8
93
+ if delta_l > 0:
94
+ f0 = np.concatenate([f0, [f0[-1]] * delta_l], 0)
95
+ f0 = f0[:len(mel)]
96
+ pitch_coarse = f0_to_coarse(f0)
97
+ return f0, pitch_coarse
98
+
99
+
100
+ def extract_mel_f0_from_fname(wav_name=None, out_name=None):
101
+ try:
102
+ out_name = wav_name.replace(".wav", "_mel_f0.npy").replace("/audio/", "/mel_f0/")
103
+ os.makedirs(os.path.dirname(out_name), exist_ok=True)
104
+
105
+ wav, mel = extract_mel_from_fname(wav_name)
106
+ f0, f0_coarse = extract_f0_from_wav_and_mel(wav, mel)
107
+ out_dict = {
108
+ "mel": mel, # [T, 80]
109
+ "f0": f0,
110
+ }
111
+ np.save(out_name, out_dict)
112
+ except Exception as e:
113
+ print(e)
114
+
115
+ def extract_mel_f0_from_video_name(mp4_name, wav_name=None, out_name=None):
116
+ if mp4_name.endswith(".mp4"):
117
+ wav_name = split_wav(mp4_name, wav_name)
118
+ if out_name is None:
119
+ out_name = mp4_name.replace(".mp4", "_mel_f0.npy").replace("/video/", "/mel_f0/")
120
+ elif mp4_name.endswith(".wav"):
121
+ wav_name = mp4_name
122
+ if out_name is None:
123
+ out_name = mp4_name.replace(".wav", "_mel_f0.npy").replace("/audio/", "/mel_f0/")
124
+
125
+ os.makedirs(os.path.dirname(out_name), exist_ok=True)
126
+
127
+ wav, mel = extract_mel_from_fname(wav_name)
128
+
129
+ f0, f0_coarse = extract_f0_from_wav_and_mel(wav, mel)
130
+ out_dict = {
131
+ "mel": mel, # [T, 80]
132
+ "f0": f0,
133
+ }
134
+ np.save(out_name, out_dict)
135
+
136
+
137
+ if __name__ == '__main__':
138
+ from argparse import ArgumentParser
139
+ parser = ArgumentParser()
140
+ parser.add_argument('--video_id', type=str, default='May', help='')
141
+ args = parser.parse_args()
142
+ ### Process Single Long Audio for NeRF dataset
143
+ person_id = args.video_id
144
+
145
+ wav_16k_name = f"data/processed/videos/{person_id}/aud.wav"
146
+ out_name = f"data/processed/videos/{person_id}/aud_mel_f0.npy"
147
+ extract_mel_f0_from_video_name(wav_16k_name, out_name)
148
+ print(f"Saved at {out_name}")
data_gen/utils/process_audio/resample_audio_to_16k.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, glob
2
+ from utils.commons.os_utils import multiprocess_glob
3
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
4
+
5
+
6
+ def extract_wav16k_job(audio_name:str):
7
+ out_path = audio_name.replace("/audio_raw/","/audio/",1)
8
+ assert out_path != audio_name # prevent inplace
9
+ os.makedirs(os.path.dirname(out_path), exist_ok=True)
10
+ ffmpeg_path = "/usr/bin/ffmpeg"
11
+
12
+ cmd = f'{ffmpeg_path} -i {audio_name} -ar 16000 -v quiet -y {out_path}'
13
+ os.system(cmd)
14
+
15
+ if __name__ == '__main__':
16
+ import argparse, glob, tqdm, random
17
+ parser = argparse.ArgumentParser()
18
+ parser.add_argument("--aud_dir", default='/home/tiger/datasets/raw/CMLR/audio_raw/')
19
+ parser.add_argument("--ds_name", default='CMLR')
20
+ parser.add_argument("--num_workers", default=64, type=int)
21
+ parser.add_argument("--process_id", default=0, type=int)
22
+ parser.add_argument("--total_process", default=1, type=int)
23
+ args = parser.parse_args()
24
+ print(f"args {args}")
25
+
26
+ aud_dir = args.aud_dir
27
+ ds_name = args.ds_name
28
+ if ds_name in ['CMLR']:
29
+ aud_name_pattern = os.path.join(aud_dir, "*/*/*.wav")
30
+ aud_names = multiprocess_glob(aud_name_pattern)
31
+ else:
32
+ raise NotImplementedError()
33
+ aud_names = sorted(aud_names)
34
+ print(f"total audio number : {len(aud_names)}")
35
+ print(f"first {aud_names[0]} last {aud_names[-1]}")
36
+ # exit()
37
+ process_id = args.process_id
38
+ total_process = args.total_process
39
+ if total_process > 1:
40
+ assert process_id <= total_process -1
41
+ num_samples_per_process = len(aud_names) // total_process
42
+ if process_id == total_process:
43
+ aud_names = aud_names[process_id * num_samples_per_process : ]
44
+ else:
45
+ aud_names = aud_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
46
+
47
+ for i, res in multiprocess_run_tqdm(extract_wav16k_job, aud_names, num_workers=args.num_workers, desc="resampling videos"):
48
+ pass
49
+
data_gen/utils/process_image/extract_lm2d.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["OMP_NUM_THREADS"] = "1"
3
+ import sys
4
+
5
+ import glob
6
+ import cv2
7
+ import tqdm
8
+ import numpy as np
9
+ from data_gen.utils.mp_feature_extractors.face_landmarker import MediapipeLandmarker
10
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
11
+ import warnings
12
+ warnings.filterwarnings('ignore')
13
+
14
+ import random
15
+ random.seed(42)
16
+
17
+ import pickle
18
+ import json
19
+ import gzip
20
+ from typing import Any
21
+
22
+ def load_file(filename, is_gzip: bool = False, is_json: bool = False) -> Any:
23
+ if is_json:
24
+ if is_gzip:
25
+ with gzip.open(filename, "r", encoding="utf-8") as f:
26
+ loaded_object = json.load(f)
27
+ return loaded_object
28
+ else:
29
+ with open(filename, "r", encoding="utf-8") as f:
30
+ loaded_object = json.load(f)
31
+ return loaded_object
32
+ else:
33
+ if is_gzip:
34
+ with gzip.open(filename, "rb") as f:
35
+ loaded_object = pickle.load(f)
36
+ return loaded_object
37
+ else:
38
+ with open(filename, "rb") as f:
39
+ loaded_object = pickle.load(f)
40
+ return loaded_object
41
+
42
+ def save_file(filename, content, is_gzip: bool = False, is_json: bool = False) -> None:
43
+ if is_json:
44
+ if is_gzip:
45
+ with gzip.open(filename, "w", encoding="utf-8") as f:
46
+ json.dump(content, f)
47
+ else:
48
+ with open(filename, "w", encoding="utf-8") as f:
49
+ json.dump(content, f)
50
+ else:
51
+ if is_gzip:
52
+ with gzip.open(filename, "wb") as f:
53
+ pickle.dump(content, f)
54
+ else:
55
+ with open(filename, "wb") as f:
56
+ pickle.dump(content, f)
57
+
58
+ face_landmarker = None
59
+
60
+ def extract_lms_mediapipe_job(img):
61
+ if img is None:
62
+ return None
63
+ global face_landmarker
64
+ if face_landmarker is None:
65
+ face_landmarker = MediapipeLandmarker()
66
+ lm478 = face_landmarker.extract_lm478_from_img(img)
67
+ return lm478
68
+
69
+ def extract_landmark_job(img_name):
70
+ try:
71
+ # if img_name == 'datasets/PanoHeadGen/raw/images/multi_view/chunk_0/seed0000002.png':
72
+ # print(1)
73
+ # input()
74
+ out_name = img_name.replace("/images_512/", "/lms_2d/").replace(".png","_lms.npy")
75
+ if os.path.exists(out_name):
76
+ print("out exists, skip...")
77
+ return
78
+ try:
79
+ os.makedirs(os.path.dirname(out_name), exist_ok=True)
80
+ except:
81
+ pass
82
+ img = cv2.imread(img_name)[:,:,::-1]
83
+
84
+ if img is not None:
85
+ lm468 = extract_lms_mediapipe_job(img)
86
+ if lm468 is not None:
87
+ np.save(out_name, lm468)
88
+ # print("Hahaha, solve one item!!!")
89
+ except Exception as e:
90
+ print(e)
91
+ pass
92
+
93
+ def out_exist_job(img_name):
94
+ out_name = img_name.replace("/images_512/", "/lms_2d/").replace(".png","_lms.npy")
95
+ if os.path.exists(out_name):
96
+ return None
97
+ else:
98
+ return img_name
99
+
100
+ # def get_todo_img_names(img_names):
101
+ # todo_img_names = []
102
+ # for i, res in multiprocess_run_tqdm(out_exist_job, img_names, num_workers=64):
103
+ # if res is not None:
104
+ # todo_img_names.append(res)
105
+ # return todo_img_names
106
+
107
+
108
+ if __name__ == '__main__':
109
+ import argparse, glob, tqdm, random
110
+ parser = argparse.ArgumentParser()
111
+ parser.add_argument("--img_dir", default='/home/tiger/datasets/raw/FFHQ/images_512/')
112
+ parser.add_argument("--ds_name", default='FFHQ')
113
+ parser.add_argument("--num_workers", default=64, type=int)
114
+ parser.add_argument("--process_id", default=0, type=int)
115
+ parser.add_argument("--total_process", default=1, type=int)
116
+ parser.add_argument("--reset", action='store_true')
117
+ parser.add_argument("--img_names_file", default="img_names.pkl", type=str)
118
+ parser.add_argument("--load_img_names", action="store_true")
119
+
120
+ args = parser.parse_args()
121
+ print(f"args {args}")
122
+ img_dir = args.img_dir
123
+ img_names_file = os.path.join(img_dir, args.img_names_file)
124
+ if args.load_img_names:
125
+ img_names = load_file(img_names_file)
126
+ print(f"load image names from {img_names_file}")
127
+ else:
128
+ if args.ds_name == 'FFHQ_MV':
129
+ img_name_pattern1 = os.path.join(img_dir, "ref_imgs/*.png")
130
+ img_names1 = glob.glob(img_name_pattern1)
131
+ img_name_pattern2 = os.path.join(img_dir, "mv_imgs/*.png")
132
+ img_names2 = glob.glob(img_name_pattern2)
133
+ img_names = img_names1 + img_names2
134
+ img_names = sorted(img_names)
135
+ elif args.ds_name == 'FFHQ':
136
+ img_name_pattern = os.path.join(img_dir, "*.png")
137
+ img_names = glob.glob(img_name_pattern)
138
+ img_names = sorted(img_names)
139
+ elif args.ds_name == "PanoHeadGen":
140
+ # img_name_patterns = ["ref/*/*.png", "multi_view/*/*.png", "reverse/*/*.png"]
141
+ img_name_patterns = ["ref/*/*.png"]
142
+ img_names = []
143
+ for img_name_pattern in img_name_patterns:
144
+ img_name_pattern_full = os.path.join(img_dir, img_name_pattern)
145
+ img_names_part = glob.glob(img_name_pattern_full)
146
+ img_names.extend(img_names_part)
147
+ img_names = sorted(img_names)
148
+
149
+ # save image names
150
+ if not args.load_img_names:
151
+ save_file(img_names_file, img_names)
152
+ print(f"save image names in {img_names_file}")
153
+
154
+ print(f"total images number: {len(img_names)}")
155
+
156
+
157
+ process_id = args.process_id
158
+ total_process = args.total_process
159
+ if total_process > 1:
160
+ assert process_id <= total_process -1
161
+ num_samples_per_process = len(img_names) // total_process
162
+ if process_id == total_process:
163
+ img_names = img_names[process_id * num_samples_per_process : ]
164
+ else:
165
+ img_names = img_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
166
+
167
+ # if not args.reset:
168
+ # img_names = get_todo_img_names(img_names)
169
+
170
+
171
+ print(f"todo_image {img_names[:10]}")
172
+ print(f"processing images number in this process: {len(img_names)}")
173
+ # print(f"todo images number: {len(img_names)}")
174
+ # input()
175
+ # exit()
176
+
177
+ if args.num_workers == 1:
178
+ index = 0
179
+ for img_name in tqdm.tqdm(img_names, desc=f"Root process {args.process_id}: extracting MP-based landmark2d"):
180
+ try:
181
+ extract_landmark_job(img_name)
182
+ except Exception as e:
183
+ print(e)
184
+ pass
185
+ if index % max(1, int(len(img_names) * 0.003)) == 0:
186
+ print(f"processed {index} / {len(img_names)}")
187
+ sys.stdout.flush()
188
+ index += 1
189
+ else:
190
+ for i, res in multiprocess_run_tqdm(
191
+ extract_landmark_job, img_names,
192
+ num_workers=args.num_workers,
193
+ desc=f"Root {args.process_id}: extracing MP-based landmark2d"):
194
+ # if index % max(1, int(len(img_names) * 0.003)) == 0:
195
+ print(f"processed {i+1} / {len(img_names)}")
196
+ sys.stdout.flush()
197
+ print(f"Root {args.process_id}: Finished extracting.")
data_gen/utils/process_image/extract_segment_imgs.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["OMP_NUM_THREADS"] = "1"
3
+
4
+ import glob
5
+ import cv2
6
+ import tqdm
7
+ import numpy as np
8
+ import PIL
9
+ from utils.commons.tensor_utils import convert_to_np
10
+ import torch
11
+ import mediapipe as mp
12
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
13
+ from data_gen.utils.mp_feature_extractors.mp_segmenter import MediapipeSegmenter
14
+ from data_gen.utils.process_video.extract_segment_imgs import inpaint_torso_job, extract_background, save_rgb_image_to_path
15
+ seg_model = MediapipeSegmenter()
16
+
17
+
18
+ def extract_segment_job(img_name):
19
+ try:
20
+ img = cv2.imread(img_name)
21
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
22
+
23
+ segmap = seg_model._cal_seg_map(img)
24
+ bg_img = extract_background([img], [segmap])
25
+ out_img_name = img_name.replace("/images_512/",f"/bg_img/").replace(".mp4", ".jpg")
26
+ save_rgb_image_to_path(bg_img, out_img_name)
27
+
28
+ com_img = img.copy()
29
+ bg_part = segmap[0].astype(bool)[..., None].repeat(3,axis=-1)
30
+ com_img[bg_part] = bg_img[bg_part]
31
+ out_img_name = img_name.replace("/images_512/",f"/com_imgs/")
32
+ save_rgb_image_to_path(com_img, out_img_name)
33
+
34
+ for mode in ['head', 'torso', 'person', 'torso_with_bg', 'bg']:
35
+ out_img, _ = seg_model._seg_out_img_with_segmap(img, segmap, mode=mode)
36
+ out_img_name = img_name.replace("/images_512/",f"/{mode}_imgs/")
37
+ out_img = cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR)
38
+ try: os.makedirs(os.path.dirname(out_img_name), exist_ok=True)
39
+ except: pass
40
+ cv2.imwrite(out_img_name, out_img)
41
+
42
+ inpaint_torso_img, inpaint_torso_with_bg_img, _, _ = inpaint_torso_job(img, segmap)
43
+ out_img_name = img_name.replace("/images_512/",f"/inpaint_torso_imgs/")
44
+ save_rgb_image_to_path(inpaint_torso_img, out_img_name)
45
+ inpaint_torso_with_bg_img[bg_part] = bg_img[bg_part]
46
+ out_img_name = img_name.replace("/images_512/",f"/inpaint_torso_with_com_bg_imgs/")
47
+ save_rgb_image_to_path(inpaint_torso_with_bg_img, out_img_name)
48
+ return 0
49
+ except Exception as e:
50
+ print(e)
51
+ return 1
52
+
53
+ def out_exist_job(img_name):
54
+ out_name1 = img_name.replace("/images_512/", "/head_imgs/")
55
+ out_name2 = img_name.replace("/images_512/", "/com_imgs/")
56
+ out_name3 = img_name.replace("/images_512/", "/inpaint_torso_with_com_bg_imgs/")
57
+
58
+ if os.path.exists(out_name1) and os.path.exists(out_name2) and os.path.exists(out_name3):
59
+ return None
60
+ else:
61
+ return img_name
62
+
63
+ def get_todo_img_names(img_names):
64
+ todo_img_names = []
65
+ for i, res in multiprocess_run_tqdm(out_exist_job, img_names, num_workers=64):
66
+ if res is not None:
67
+ todo_img_names.append(res)
68
+ return todo_img_names
69
+
70
+
71
+ if __name__ == '__main__':
72
+ import argparse, glob, tqdm, random
73
+ parser = argparse.ArgumentParser()
74
+ parser.add_argument("--img_dir", default='./images_512')
75
+ # parser.add_argument("--img_dir", default='/home/tiger/datasets/raw/FFHQ/images_512')
76
+ parser.add_argument("--ds_name", default='FFHQ')
77
+ parser.add_argument("--num_workers", default=1, type=int)
78
+ parser.add_argument("--seed", default=0, type=int)
79
+ parser.add_argument("--process_id", default=0, type=int)
80
+ parser.add_argument("--total_process", default=1, type=int)
81
+ parser.add_argument("--reset", action='store_true')
82
+
83
+ args = parser.parse_args()
84
+ img_dir = args.img_dir
85
+ if args.ds_name == 'FFHQ_MV':
86
+ img_name_pattern1 = os.path.join(img_dir, "ref_imgs/*.png")
87
+ img_names1 = glob.glob(img_name_pattern1)
88
+ img_name_pattern2 = os.path.join(img_dir, "mv_imgs/*.png")
89
+ img_names2 = glob.glob(img_name_pattern2)
90
+ img_names = img_names1 + img_names2
91
+ elif args.ds_name == 'FFHQ':
92
+ img_name_pattern = os.path.join(img_dir, "*.png")
93
+ img_names = glob.glob(img_name_pattern)
94
+
95
+ img_names = sorted(img_names)
96
+ random.seed(args.seed)
97
+ random.shuffle(img_names)
98
+
99
+ process_id = args.process_id
100
+ total_process = args.total_process
101
+ if total_process > 1:
102
+ assert process_id <= total_process -1
103
+ num_samples_per_process = len(img_names) // total_process
104
+ if process_id == total_process:
105
+ img_names = img_names[process_id * num_samples_per_process : ]
106
+ else:
107
+ img_names = img_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
108
+
109
+ if not args.reset:
110
+ img_names = get_todo_img_names(img_names)
111
+ print(f"todo images number: {len(img_names)}")
112
+
113
+ for vid_name in multiprocess_run_tqdm(extract_segment_job ,img_names, desc=f"Root process {args.process_id}: extracting segment images", num_workers=args.num_workers):
114
+ pass
data_gen/utils/process_image/fit_3dmm_landmark.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.core.numeric import require
2
+ from numpy.lib.function_base import quantile
3
+ import torch
4
+ import torch.nn.functional as F
5
+ import copy
6
+ import numpy as np
7
+
8
+ import os
9
+ import sys
10
+ import cv2
11
+ import argparse
12
+ import tqdm
13
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
14
+ from data_gen.utils.mp_feature_extractors.face_landmarker import MediapipeLandmarker
15
+
16
+ from deep_3drecon.deep_3drecon_models.bfm import ParametricFaceModel
17
+ import pickle
18
+
19
+ face_model = ParametricFaceModel(bfm_folder='deep_3drecon/BFM',
20
+ camera_distance=10, focal=1015, keypoint_mode='mediapipe')
21
+ face_model.to("cuda")
22
+
23
+
24
+ index_lm68_from_lm468 = [127,234,93,132,58,136,150,176,152,400,379,365,288,361,323,454,356,70,63,105,66,107,336,296,334,293,300,168,197,5,4,75,97,2,326,305,
25
+ 33,160,158,133,153,144,362,385,387,263,373,380,61,40,37,0,267,270,291,321,314,17,84,91,78,81,13,311,308,402,14,178]
26
+
27
+ dir_path = os.path.dirname(os.path.realpath(__file__))
28
+
29
+ LAMBDA_REG_ID = 0.3
30
+ LAMBDA_REG_EXP = 0.05
31
+
32
+ def save_file(name, content):
33
+ with open(name, "wb") as f:
34
+ pickle.dump(content, f)
35
+
36
+ def load_file(name):
37
+ with open(name, "rb") as f:
38
+ content = pickle.load(f)
39
+ return content
40
+
41
+ def cal_lan_loss_mp(proj_lan, gt_lan):
42
+ # [B, 68, 2]
43
+ loss = (proj_lan - gt_lan).pow(2)
44
+ # loss = (proj_lan - gt_lan).abs()
45
+ unmatch_mask = [ 93, 127, 132, 234, 323, 356, 361, 454]
46
+ eye = [33,246,161,160,159,158,157,173,133,155,154,153,145,144,163,7] + [263,466,388,387,386,385,384,398,362,382,381,380,374,373,390,249]
47
+ inner_lip = [78,191,80,81,82,13,312,311,310,415,308,324,318,402,317,14,87,178,88,95]
48
+ outer_lip = [61,185,40,39,37,0,267,269,270,409,291,375,321,405,314,17,84,181,91,146]
49
+ weights = torch.ones_like(loss)
50
+ weights[:, eye] = 5
51
+ weights[:, inner_lip] = 2
52
+ weights[:, outer_lip] = 2
53
+ weights[:, unmatch_mask] = 0
54
+ loss = loss * weights
55
+ return torch.mean(loss)
56
+
57
+ def cal_lan_loss(proj_lan, gt_lan):
58
+ # [B, 68, 2]
59
+ loss = (proj_lan - gt_lan)** 2
60
+ # use the ldm weights from deep3drecon, see deep_3drecon/deep_3drecon_models/losses.py
61
+ weights = torch.zeros_like(loss)
62
+ weights = torch.ones_like(loss)
63
+ weights[:, 36:48, :] = 3 # eye 12 points
64
+ weights[:, -8:, :] = 3 # inner lip 8 points
65
+ weights[:, 28:31, :] = 3 # nose 3 points
66
+ loss = loss * weights
67
+ return torch.mean(loss)
68
+
69
+ def set_requires_grad(tensor_list):
70
+ for tensor in tensor_list:
71
+ tensor.requires_grad = True
72
+
73
+ def read_video_to_frames(img_name):
74
+ frames = []
75
+ cap = cv2.VideoCapture(img_name)
76
+ while cap.isOpened():
77
+ ret, frame_bgr = cap.read()
78
+ if frame_bgr is None:
79
+ break
80
+ frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
81
+ frames.append(frame_rgb)
82
+ return np.stack(frames)
83
+
84
+ @torch.enable_grad()
85
+ def fit_3dmm_for_a_image(img_name, debug=False, keypoint_mode='mediapipe', device="cuda:0", save=True):
86
+ img = cv2.imread(img_name)
87
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
88
+ img_h, img_w = img.shape[0], img.shape[0]
89
+ assert img_h == img_w
90
+ num_frames = 1
91
+
92
+ lm_name = img_name.replace("/images_512/", "/lms_2d/").replace(".png", "_lms.npy")
93
+ if lm_name.endswith('_lms.npy') and os.path.exists(lm_name):
94
+ lms = np.load(lm_name)
95
+ else:
96
+ # print("lms_2d file not found, try to extract it from image...")
97
+ try:
98
+ landmarker = MediapipeLandmarker()
99
+ lms = landmarker.extract_lm478_from_img_name(img_name)
100
+ # lms = landmarker.extract_lm478_from_img(img)
101
+ except Exception as e:
102
+ print(e)
103
+ return
104
+ if lms is None:
105
+ print("get None lms_2d, please check whether each frame has one head, exiting...")
106
+ return
107
+ lms = lms[:468].reshape([468,2])
108
+ lms = torch.FloatTensor(lms).to(device=device)
109
+ lms[..., 1] = img_h - lms[..., 1] # flip the height axis
110
+
111
+ if keypoint_mode == 'mediapipe':
112
+ cal_lan_loss_fn = cal_lan_loss_mp
113
+ out_name = img_name.replace("/images_512/", "/coeff_fit_mp/").replace(".png", "_coeff_fit_mp.npy")
114
+ else:
115
+ cal_lan_loss_fn = cal_lan_loss
116
+ out_name = img_name.replace("/images_512/", "/coeff_fit_lm68/").replace(".png", "_coeff_fit_lm68.npy")
117
+ try:
118
+ os.makedirs(os.path.dirname(out_name), exist_ok=True)
119
+ except:
120
+ pass
121
+
122
+ id_dim, exp_dim = 80, 64
123
+ sel_ids = np.arange(0, num_frames, 40)
124
+ sel_num = sel_ids.shape[0]
125
+ arg_focal = face_model.focal
126
+
127
+ h = w = face_model.center * 2
128
+ img_scale_factor = img_h / h
129
+ lms /= img_scale_factor
130
+ cxy = torch.tensor((w / 2.0, h / 2.0), dtype=torch.float).to(device=device)
131
+
132
+ id_para = lms.new_zeros((num_frames, id_dim), requires_grad=True) # lms.new_zeros((1, id_dim), requires_grad=True)
133
+ exp_para = lms.new_zeros((num_frames, exp_dim), requires_grad=True)
134
+ euler_angle = lms.new_zeros((num_frames, 3), requires_grad=True)
135
+ trans = lms.new_zeros((num_frames, 3), requires_grad=True)
136
+
137
+ focal_length = lms.new_zeros(1, requires_grad=True)
138
+ focal_length.data += arg_focal
139
+
140
+ set_requires_grad([id_para, exp_para, euler_angle, trans])
141
+
142
+ optimizer_idexp = torch.optim.Adam([id_para, exp_para], lr=.1)
143
+ optimizer_frame = torch.optim.Adam([euler_angle, trans], lr=.1)
144
+
145
+ # 其他参数初始化,先训练euler和trans
146
+ for _ in range(200):
147
+ proj_geo = face_model.compute_for_landmark_fit(
148
+ id_para, exp_para, euler_angle, trans)
149
+ loss_lan = cal_lan_loss_fn(proj_geo[:, :, :2], lms.detach())
150
+ loss = loss_lan
151
+ optimizer_frame.zero_grad()
152
+ loss.backward()
153
+ optimizer_frame.step()
154
+ # print(f"loss_lan: {loss_lan.item():.2f}, euler_abs_mean: {euler_angle.abs().mean().item():.4f}, euler_std: {euler_angle.std().item():.4f}, euler_min: {euler_angle.min().item():.4f}, euler_max: {euler_angle.max().item():.4f}")
155
+ # print(f"trans_z_mean: {trans[...,2].mean().item():.4f}, trans_z_std: {trans[...,2].std().item():.4f}, trans_min: {trans[...,2].min().item():.4f}, trans_max: {trans[...,2].max().item():.4f}")
156
+
157
+ for param_group in optimizer_frame.param_groups:
158
+ param_group['lr'] = 0.1
159
+
160
+ # "jointly roughly training id exp euler trans"
161
+ for _ in range(200):
162
+ proj_geo = face_model.compute_for_landmark_fit(
163
+ id_para, exp_para, euler_angle, trans)
164
+ loss_lan = cal_lan_loss_fn(
165
+ proj_geo[:, :, :2], lms.detach())
166
+ loss_regid = torch.mean(id_para*id_para) # 正则化
167
+ loss_regexp = torch.mean(exp_para * exp_para)
168
+
169
+ loss = loss_lan + loss_regid * LAMBDA_REG_ID + loss_regexp * LAMBDA_REG_EXP
170
+ optimizer_idexp.zero_grad()
171
+ optimizer_frame.zero_grad()
172
+ loss.backward()
173
+ optimizer_idexp.step()
174
+ optimizer_frame.step()
175
+ # print(f"loss_lan: {loss_lan.item():.2f}, loss_reg_id: {loss_regid.item():.2f},loss_reg_exp: {loss_regexp.item():.2f},")
176
+ # print(f"euler_abs_mean: {euler_angle.abs().mean().item():.4f}, euler_std: {euler_angle.std().item():.4f}, euler_min: {euler_angle.min().item():.4f}, euler_max: {euler_angle.max().item():.4f}")
177
+ # print(f"trans_z_mean: {trans[...,2].mean().item():.4f}, trans_z_std: {trans[...,2].std().item():.4f}, trans_min: {trans[...,2].min().item():.4f}, trans_max: {trans[...,2].max().item():.4f}")
178
+
179
+ # start fine training, intialize from the roughly trained results
180
+ id_para_ = lms.new_zeros((num_frames, id_dim), requires_grad=True)
181
+ id_para_.data = id_para.data.clone()
182
+ id_para = id_para_
183
+ exp_para_ = lms.new_zeros((num_frames, exp_dim), requires_grad=True)
184
+ exp_para_.data = exp_para.data.clone()
185
+ exp_para = exp_para_
186
+ euler_angle_ = lms.new_zeros((num_frames, 3), requires_grad=True)
187
+ euler_angle_.data = euler_angle.data.clone()
188
+ euler_angle = euler_angle_
189
+ trans_ = lms.new_zeros((num_frames, 3), requires_grad=True)
190
+ trans_.data = trans.data.clone()
191
+ trans = trans_
192
+
193
+ batch_size = 1
194
+
195
+ # "fine fitting the 3DMM in batches"
196
+ for i in range(int((num_frames-1)/batch_size+1)):
197
+ if (i+1)*batch_size > num_frames:
198
+ start_n = num_frames-batch_size
199
+ sel_ids = np.arange(max(num_frames-batch_size,0), num_frames)
200
+ else:
201
+ start_n = i*batch_size
202
+ sel_ids = np.arange(i*batch_size, i*batch_size+batch_size)
203
+ sel_lms = lms[sel_ids]
204
+
205
+ sel_id_para = id_para.new_zeros(
206
+ (batch_size, id_dim), requires_grad=True)
207
+ sel_id_para.data = id_para[sel_ids].clone()
208
+ sel_exp_para = exp_para.new_zeros(
209
+ (batch_size, exp_dim), requires_grad=True)
210
+ sel_exp_para.data = exp_para[sel_ids].clone()
211
+ sel_euler_angle = euler_angle.new_zeros(
212
+ (batch_size, 3), requires_grad=True)
213
+ sel_euler_angle.data = euler_angle[sel_ids].clone()
214
+ sel_trans = trans.new_zeros((batch_size, 3), requires_grad=True)
215
+ sel_trans.data = trans[sel_ids].clone()
216
+
217
+ set_requires_grad([sel_id_para, sel_exp_para, sel_euler_angle, sel_trans])
218
+ optimizer_cur_batch = torch.optim.Adam(
219
+ [sel_id_para, sel_exp_para, sel_euler_angle, sel_trans], lr=0.005)
220
+
221
+ for j in range(50):
222
+ proj_geo = face_model.compute_for_landmark_fit(
223
+ sel_id_para, sel_exp_para, sel_euler_angle, sel_trans)
224
+ loss_lan = cal_lan_loss_fn(
225
+ proj_geo[:, :, :2], lms.unsqueeze(0).detach())
226
+
227
+ loss_regid = torch.mean(sel_id_para*sel_id_para) # 正则化
228
+ loss_regexp = torch.mean(sel_exp_para*sel_exp_para)
229
+ loss = loss_lan + loss_regid * LAMBDA_REG_ID + loss_regexp * LAMBDA_REG_EXP
230
+ optimizer_cur_batch.zero_grad()
231
+ loss.backward()
232
+ optimizer_cur_batch.step()
233
+ print(f"batch {i} | loss_lan: {loss_lan.item():.2f}, loss_reg_id: {loss_regid.item():.2f},loss_reg_exp: {loss_regexp.item():.2f}")
234
+ id_para[sel_ids].data = sel_id_para.data.clone()
235
+ exp_para[sel_ids].data = sel_exp_para.data.clone()
236
+ euler_angle[sel_ids].data = sel_euler_angle.data.clone()
237
+ trans[sel_ids].data = sel_trans.data.clone()
238
+
239
+ coeff_dict = {'id': id_para.detach().cpu().numpy(), 'exp': exp_para.detach().cpu().numpy(),
240
+ 'euler': euler_angle.detach().cpu().numpy(), 'trans': trans.detach().cpu().numpy()}
241
+ if save:
242
+ np.save(out_name, coeff_dict, allow_pickle=True)
243
+
244
+ if debug:
245
+ import imageio
246
+ debug_name = img_name.replace("/images_512/", "/coeff_fit_mp_debug/").replace(".png", "_debug.png").replace(".jpg", "_debug.jpg")
247
+ try: os.makedirs(os.path.dirname(debug_name), exist_ok=True)
248
+ except: pass
249
+ proj_geo = face_model.compute_for_landmark_fit(id_para, exp_para, euler_angle, trans)
250
+ lm68s = proj_geo[:,:,:2].detach().cpu().numpy() # [T, 68,2]
251
+ lm68s = lm68s * img_scale_factor
252
+ lms = lms * img_scale_factor
253
+ lm68s[..., 1] = img_h - lm68s[..., 1] # flip the height axis
254
+ lms[..., 1] = img_h - lms[..., 1] # flip the height axis
255
+ lm68s = lm68s.astype(int)
256
+ lm68s = lm68s.reshape([-1,2])
257
+ lms = lms.cpu().numpy().astype(int).reshape([-1,2])
258
+ for lm in lm68s:
259
+ img = cv2.circle(img, lm, 1, (0, 0, 255), thickness=-1)
260
+ for gt_lm in lms:
261
+ img = cv2.circle(img, gt_lm, 2, (255, 0, 0), thickness=1)
262
+ imageio.imwrite(debug_name, img)
263
+ print(f"debug img saved at {debug_name}")
264
+ return coeff_dict
265
+
266
+ def out_exist_job(vid_name):
267
+ out_name = vid_name.replace("/images_512/", "/coeff_fit_mp/").replace(".png","_coeff_fit_mp.npy")
268
+ # if os.path.exists(out_name) or not os.path.exists(lms_name):
269
+ if os.path.exists(out_name):
270
+ return None
271
+ else:
272
+ return vid_name
273
+
274
+ def get_todo_img_names(img_names):
275
+ todo_img_names = []
276
+ for i, res in multiprocess_run_tqdm(out_exist_job, img_names, num_workers=16):
277
+ if res is not None:
278
+ todo_img_names.append(res)
279
+ return todo_img_names
280
+
281
+
282
+ if __name__ == '__main__':
283
+ import argparse, glob, tqdm
284
+ parser = argparse.ArgumentParser()
285
+ parser.add_argument("--img_dir", default='/home/tiger/datasets/raw/FFHQ/images_512')
286
+ parser.add_argument("--ds_name", default='FFHQ')
287
+ parser.add_argument("--seed", default=0, type=int)
288
+ parser.add_argument("--process_id", default=0, type=int)
289
+ parser.add_argument("--total_process", default=1, type=int)
290
+ parser.add_argument("--keypoint_mode", default='mediapipe', type=str)
291
+ parser.add_argument("--debug", action='store_true')
292
+ parser.add_argument("--reset", action='store_true')
293
+ parser.add_argument("--device", default="cuda:0", type=str)
294
+ parser.add_argument("--output_log", action='store_true')
295
+ parser.add_argument("--load_names", action="store_true")
296
+
297
+ args = parser.parse_args()
298
+ img_dir = args.img_dir
299
+ load_names = args.load_names
300
+
301
+ print(f"args {args}")
302
+
303
+ if args.ds_name == 'single_img':
304
+ img_names = [img_dir]
305
+ else:
306
+ img_names_path = os.path.join(img_dir, "img_dir.pkl")
307
+ if os.path.exists(img_names_path) and load_names:
308
+ print(f"loading vid names from {img_names_path}")
309
+ img_names = load_file(img_names_path)
310
+ else:
311
+ if args.ds_name == 'FFHQ_MV':
312
+ img_name_pattern1 = os.path.join(img_dir, "ref_imgs/*.png")
313
+ img_names1 = glob.glob(img_name_pattern1)
314
+ img_name_pattern2 = os.path.join(img_dir, "mv_imgs/*.png")
315
+ img_names2 = glob.glob(img_name_pattern2)
316
+ img_names = img_names1 + img_names2
317
+ img_names = sorted(img_names)
318
+ elif args.ds_name == 'FFHQ':
319
+ img_name_pattern = os.path.join(img_dir, "*.png")
320
+ img_names = glob.glob(img_name_pattern)
321
+ img_names = sorted(img_names)
322
+ elif args.ds_name == "PanoHeadGen":
323
+ img_name_patterns = ["ref/*/*.png"]
324
+ img_names = []
325
+ for img_name_pattern in img_name_patterns:
326
+ img_name_pattern_full = os.path.join(img_dir, img_name_pattern)
327
+ img_names_part = glob.glob(img_name_pattern_full)
328
+ img_names.extend(img_names_part)
329
+ img_names = sorted(img_names)
330
+ print(f"saving image names to {img_names_path}")
331
+ save_file(img_names_path, img_names)
332
+
333
+ # import random
334
+ # random.seed(args.seed)
335
+ # random.shuffle(img_names)
336
+
337
+ face_model = ParametricFaceModel(bfm_folder='deep_3drecon/BFM',
338
+ camera_distance=10, focal=1015, keypoint_mode=args.keypoint_mode)
339
+ face_model.to(torch.device(args.device))
340
+
341
+ process_id = args.process_id
342
+ total_process = args.total_process
343
+ if total_process > 1:
344
+ assert process_id <= total_process -1 and process_id >= 0
345
+ num_samples_per_process = len(img_names) // total_process
346
+ if process_id == total_process:
347
+ img_names = img_names[process_id * num_samples_per_process : ]
348
+ else:
349
+ img_names = img_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
350
+ print(f"image names number (before fileter): {len(img_names)}")
351
+
352
+
353
+ if not args.reset:
354
+ img_names = get_todo_img_names(img_names)
355
+
356
+ print(f"image names number (after fileter): {len(img_names)}")
357
+ for i in tqdm.trange(len(img_names), desc=f"process {process_id}: fitting 3dmm ..."):
358
+ img_name = img_names[i]
359
+ try:
360
+ fit_3dmm_for_a_image(img_name, args.debug, device=args.device)
361
+ except Exception as e:
362
+ print(img_name, e)
363
+ if args.output_log and i % max(int(len(img_names) * 0.003), 1) == 0:
364
+ print(f"process {process_id}: {i + 1} / {len(img_names)} done")
365
+ sys.stdout.flush()
366
+ sys.stderr.flush()
367
+
368
+ print(f"process {process_id}: fitting 3dmm all done")
369
+
data_gen/utils/process_video/euler2quaterion.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import math
4
+ import numba
5
+ from scipy.spatial.transform import Rotation as R
6
+
7
+ def euler2quaterion(euler, use_radian=True):
8
+ """
9
+ euler: np.array, [batch, 3]
10
+ return: the quaterion, np.array, [batch, 4]
11
+ """
12
+ r = R.from_euler('xyz',euler, degrees=not use_radian)
13
+ return r.as_quat()
14
+
15
+ def quaterion2euler(quat, use_radian=True):
16
+ """
17
+ quat: np.array, [batch, 4]
18
+ return: the euler, np.array, [batch, 3]
19
+ """
20
+ r = R.from_quat(quat)
21
+ return r.as_euler('xyz', degrees=not use_radian)
22
+
23
+ def rot2quaterion(rot):
24
+ r = R.from_matrix(rot)
25
+ return r.as_quat()
26
+
27
+ def quaterion2rot(quat):
28
+ r = R.from_quat(quat)
29
+ return r.as_matrix()
30
+
31
+ if __name__ == '__main__':
32
+ euler = np.array([89.999,89.999,89.999] * 100).reshape([100,3])
33
+ q = euler2quaterion(euler, use_radian=False)
34
+ e = quaterion2euler(q, use_radian=False)
35
+ print(" ")
data_gen/utils/process_video/extract_blink.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from data_util.face3d_helper import Face3DHelper
3
+ from utils.commons.tensor_utils import convert_to_tensor
4
+
5
+ def polygon_area(x, y):
6
+ """
7
+ x: [T, K=6]
8
+ y: [T, K=6]
9
+ return: [T,]
10
+ """
11
+ x_ = x - x.mean(axis=-1, keepdims=True)
12
+ y_ = y - y.mean(axis=-1, keepdims=True)
13
+ correction = x_[:,-1] * y_[:,0] - y_[:,-1]* x_[:,0]
14
+ main_area = (x_[:,:-1] * y_[:,1:]).sum(axis=-1) - (y_[:,:-1] * x_[:,1:]).sum(axis=-1)
15
+ return 0.5 * np.abs(main_area + correction)
16
+
17
+ def get_eye_area_percent(id, exp, face3d_helper):
18
+ id = convert_to_tensor(id)
19
+ exp = convert_to_tensor(exp)
20
+ cano_lm3d = face3d_helper.reconstruct_cano_lm3d(id, exp)
21
+ cano_lm2d = (cano_lm3d[..., :2] + 1) / 2
22
+ lms = cano_lm2d.cpu().numpy()
23
+ eyes_left = slice(36, 42)
24
+ eyes_right = slice(42, 48)
25
+ area_left = polygon_area(lms[:, eyes_left, 0], lms[:, eyes_left, 1])
26
+ area_right = polygon_area(lms[:, eyes_right, 0], lms[:, eyes_right, 1])
27
+ # area percentage of two eyes of the whole image...
28
+ area_percent = (area_left + area_right) / 1 * 100 # recommend threshold is 0.25%
29
+ return area_percent # [T,]
30
+
31
+
32
+ if __name__ == '__main__':
33
+ import numpy as np
34
+ import imageio
35
+ import cv2
36
+ import torch
37
+ from data_gen.utils.process_video.extract_lm2d import extract_lms_mediapipe_job, read_video_to_frames, index_lm68_from_lm468
38
+ from data_gen.utils.process_video.fit_3dmm_landmark import fit_3dmm_for_a_video
39
+ from data_util.face3d_helper import Face3DHelper
40
+
41
+ face3d_helper = Face3DHelper()
42
+ video_name = 'data/raw/videos/May_10s.mp4'
43
+ frames = read_video_to_frames(video_name)
44
+ coeff = fit_3dmm_for_a_video(video_name, save=False)
45
+ area_percent = get_eye_area_percent(torch.tensor(coeff['id']), torch.tensor(coeff['exp']), face3d_helper)
46
+ writer = imageio.get_writer("1.mp4", fps=25)
47
+ for idx, frame in enumerate(frames):
48
+ frame = cv2.putText(frame, f"{area_percent[idx]:.2f}", org=(128,128), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=1, color=(255,0,0), thickness=1)
49
+ writer.append_data(frame)
50
+ writer.close()
data_gen/utils/process_video/extract_lm2d.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["OMP_NUM_THREADS"] = "1"
3
+ import sys
4
+ import glob
5
+ import cv2
6
+ import pickle
7
+ import tqdm
8
+ import numpy as np
9
+ import mediapipe as mp
10
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
11
+ from utils.commons.os_utils import multiprocess_glob
12
+ from data_gen.utils.mp_feature_extractors.face_landmarker import MediapipeLandmarker
13
+ import warnings
14
+ import traceback
15
+
16
+ warnings.filterwarnings('ignore')
17
+
18
+ """
19
+ 基于Face_aligment的lm68已被弃用,因为其:
20
+ 1. 对眼睛部位的预测精度极低
21
+ 2. 无法在大偏转角度时准确预测被遮挡的下颚线, 导致大角度时3dmm的GT label就是有问题的, 从而影响性能
22
+ 我们目前转而使用基于mediapipe的lm68
23
+ """
24
+ # def extract_landmarks(ori_imgs_dir):
25
+
26
+ # print(f'[INFO] ===== extract face landmarks from {ori_imgs_dir} =====')
27
+
28
+ # fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
29
+ # image_paths = glob.glob(os.path.join(ori_imgs_dir, '*.png'))
30
+ # for image_path in tqdm.tqdm(image_paths):
31
+ # out_name = image_path.replace("/images_512/", "/lms_2d/").replace(".png",".lms")
32
+ # if os.path.exists(out_name):
33
+ # continue
34
+ # input = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) # [H, W, 3]
35
+ # input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
36
+ # preds = fa.get_landmarks(input)
37
+ # if preds is None:
38
+ # print(f"Skip {image_path} for no face detected")
39
+ # continue
40
+ # if len(preds) > 0:
41
+ # lands = preds[0].reshape(-1, 2)[:,:2]
42
+ # os.makedirs(os.path.dirname(out_name), exist_ok=True)
43
+ # np.savetxt(out_name, lands, '%f')
44
+ # del fa
45
+ # print(f'[INFO] ===== extracted face landmarks =====')
46
+
47
+ def save_file(name, content):
48
+ with open(name, "wb") as f:
49
+ pickle.dump(content, f)
50
+
51
+ def load_file(name):
52
+ with open(name, "rb") as f:
53
+ content = pickle.load(f)
54
+ return content
55
+
56
+
57
+ face_landmarker = None
58
+
59
+ def extract_landmark_job(video_name, nerf=False):
60
+ try:
61
+ if nerf:
62
+ out_name = video_name.replace("/raw/", "/processed/").replace(".mp4","/lms_2d.npy")
63
+ else:
64
+ out_name = video_name.replace("/video/", "/lms_2d/").replace(".mp4","_lms.npy")
65
+ if os.path.exists(out_name):
66
+ # print("out exists, skip...")
67
+ return
68
+ try:
69
+ os.makedirs(os.path.dirname(out_name), exist_ok=True)
70
+ except:
71
+ pass
72
+ global face_landmarker
73
+ if face_landmarker is None:
74
+ face_landmarker = MediapipeLandmarker()
75
+ img_lm478, vid_lm478 = face_landmarker.extract_lm478_from_video_name(video_name)
76
+ lm478 = face_landmarker.combine_vid_img_lm478_to_lm478(img_lm478, vid_lm478)
77
+ np.save(out_name, lm478)
78
+ return True
79
+ # print("Hahaha, solve one item!!!")
80
+ except Exception as e:
81
+ traceback.print_exc()
82
+ return False
83
+
84
+ def out_exist_job(vid_name):
85
+ out_name = vid_name.replace("/video/", "/lms_2d/").replace(".mp4","_lms.npy")
86
+ if os.path.exists(out_name):
87
+ return None
88
+ else:
89
+ return vid_name
90
+
91
+ def get_todo_vid_names(vid_names):
92
+ if len(vid_names) == 1: # nerf
93
+ return vid_names
94
+ todo_vid_names = []
95
+ for i, res in multiprocess_run_tqdm(out_exist_job, vid_names, num_workers=128):
96
+ if res is not None:
97
+ todo_vid_names.append(res)
98
+ return todo_vid_names
99
+
100
+ if __name__ == '__main__':
101
+ import argparse, glob, tqdm, random
102
+ parser = argparse.ArgumentParser()
103
+ parser.add_argument("--vid_dir", default='nerf')
104
+ parser.add_argument("--ds_name", default='data/raw/videos/May.mp4')
105
+ parser.add_argument("--num_workers", default=2, type=int)
106
+ parser.add_argument("--process_id", default=0, type=int)
107
+ parser.add_argument("--total_process", default=1, type=int)
108
+ parser.add_argument("--reset", action="store_true")
109
+ parser.add_argument("--load_names", action="store_true")
110
+
111
+ args = parser.parse_args()
112
+ vid_dir = args.vid_dir
113
+ ds_name = args.ds_name
114
+ load_names = args.load_names
115
+
116
+ if ds_name.lower() == 'nerf': # 处理单个视频
117
+ vid_names = [vid_dir]
118
+ out_names = [video_name.replace("/raw/", "/processed/").replace(".mp4","/lms_2d.npy") for video_name in vid_names]
119
+ else: # 处理整个数据集
120
+ if ds_name in ['lrs3_trainval']:
121
+ vid_name_pattern = os.path.join(vid_dir, "*/*.mp4")
122
+ elif ds_name in ['TH1KH_512', 'CelebV-HQ']:
123
+ vid_name_pattern = os.path.join(vid_dir, "*.mp4")
124
+ elif ds_name in ['lrs2', 'lrs3', 'voxceleb2', 'CMLR']:
125
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*.mp4")
126
+ elif ds_name in ["RAVDESS", 'VFHQ']:
127
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*/*.mp4")
128
+ else:
129
+ raise NotImplementedError()
130
+
131
+ vid_names_path = os.path.join(vid_dir, "vid_names.pkl")
132
+ if os.path.exists(vid_names_path) and load_names:
133
+ print(f"loading vid names from {vid_names_path}")
134
+ vid_names = load_file(vid_names_path)
135
+ else:
136
+ vid_names = multiprocess_glob(vid_name_pattern)
137
+ vid_names = sorted(vid_names)
138
+ if not load_names:
139
+ print(f"saving vid names to {vid_names_path}")
140
+ save_file(vid_names_path, vid_names)
141
+ out_names = [video_name.replace("/video/", "/lms_2d/").replace(".mp4","_lms.npy") for video_name in vid_names]
142
+
143
+ process_id = args.process_id
144
+ total_process = args.total_process
145
+ if total_process > 1:
146
+ assert process_id <= total_process -1
147
+ num_samples_per_process = len(vid_names) // total_process
148
+ if process_id == total_process:
149
+ vid_names = vid_names[process_id * num_samples_per_process : ]
150
+ else:
151
+ vid_names = vid_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
152
+
153
+ if not args.reset:
154
+ vid_names = get_todo_vid_names(vid_names)
155
+ print(f"todo videos number: {len(vid_names)}")
156
+
157
+ fail_cnt = 0
158
+ job_args = [(vid_name, ds_name=='nerf') for vid_name in vid_names]
159
+ for (i, res) in multiprocess_run_tqdm(extract_landmark_job, job_args, num_workers=args.num_workers, desc=f"Root {args.process_id}: extracing MP-based landmark2d"):
160
+ if res is False:
161
+ fail_cnt += 1
162
+ print(f"finished {i + 1} / {len(vid_names)} = {(i + 1) / len(vid_names):.4f}, failed {fail_cnt} / {i + 1} = {fail_cnt / (i + 1):.4f}")
163
+ sys.stdout.flush()
164
+ pass
data_gen/utils/process_video/extract_segment_imgs.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["OMP_NUM_THREADS"] = "1"
3
+ import random
4
+ import glob
5
+ import cv2
6
+ import tqdm
7
+ import numpy as np
8
+ from typing import Union
9
+ from utils.commons.tensor_utils import convert_to_np
10
+ from utils.commons.os_utils import multiprocess_glob
11
+ import pickle
12
+ import traceback
13
+ import multiprocessing
14
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
15
+ from scipy.ndimage import binary_erosion, binary_dilation
16
+ from sklearn.neighbors import NearestNeighbors
17
+ from mediapipe.tasks.python import vision
18
+ from data_gen.utils.mp_feature_extractors.mp_segmenter import MediapipeSegmenter, encode_segmap_mask_to_image, decode_segmap_mask_from_image, job_cal_seg_map_for_image
19
+
20
+ seg_model = None
21
+ segmenter = None
22
+ mat_model = None
23
+ lama_model = None
24
+ lama_config = None
25
+
26
+ from data_gen.utils.process_video.split_video_to_imgs import extract_img_job
27
+
28
+ BG_NAME_MAP = {
29
+ "knn": "",
30
+ }
31
+ FRAME_SELECT_INTERVAL = 5
32
+ SIM_METHOD = "mse"
33
+ SIM_THRESHOLD = 3
34
+
35
+ def save_file(name, content):
36
+ with open(name, "wb") as f:
37
+ pickle.dump(content, f)
38
+
39
+ def load_file(name):
40
+ with open(name, "rb") as f:
41
+ content = pickle.load(f)
42
+ return content
43
+
44
+ def save_rgb_alpha_image_to_path(img, alpha, img_path):
45
+ try: os.makedirs(os.path.dirname(img_path), exist_ok=True)
46
+ except: pass
47
+ cv2.imwrite(img_path, np.concatenate([cv2.cvtColor(img, cv2.COLOR_RGB2BGR), alpha], axis=-1))
48
+
49
+ def save_rgb_image_to_path(img, img_path):
50
+ try: os.makedirs(os.path.dirname(img_path), exist_ok=True)
51
+ except: pass
52
+ cv2.imwrite(img_path, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
53
+
54
+ def load_rgb_image_to_path(img_path):
55
+ return cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
56
+
57
+ def image_similarity(x: np.ndarray, y: np.ndarray, method="mse"):
58
+ if method == "mse":
59
+ return np.mean((x - y) ** 2)
60
+ else:
61
+ raise NotImplementedError
62
+
63
+ def extract_background(img_lst, segmap_mask_lst=None, method="knn", device='cpu', mix_bg=True):
64
+ """
65
+ img_lst: list of rgb ndarray
66
+ method: "knn"
67
+ """
68
+ global segmenter
69
+ global seg_model
70
+ global mat_model
71
+ global lama_model
72
+ global lama_config
73
+
74
+ assert len(img_lst) > 0
75
+ if segmap_mask_lst is not None:
76
+ assert len(segmap_mask_lst) == len(img_lst)
77
+ else:
78
+ del segmenter
79
+ del seg_model
80
+ seg_model = MediapipeSegmenter()
81
+ segmenter = vision.ImageSegmenter.create_from_options(seg_model.video_options)
82
+
83
+ def get_segmap_mask(img_lst, segmap_mask_lst, index):
84
+ if segmap_mask_lst is not None:
85
+ segmap = refresh_segment_mask(segmap_mask_lst[index])
86
+ else:
87
+ segmap = seg_model._cal_seg_map(refresh_image(img_lst[index]), segmenter=segmenter)
88
+ return segmap
89
+
90
+ if method == "knn":
91
+ num_frames = len(img_lst)
92
+ if num_frames < 100:
93
+ FRAME_SELECT_INTERVAL = 5
94
+ elif num_frames < 10000:
95
+ FRAME_SELECT_INTERVAL = 20
96
+ else:
97
+ FRAME_SELECT_INTERVAL = num_frames // 500
98
+
99
+ img_lst = img_lst[::FRAME_SELECT_INTERVAL] if num_frames > FRAME_SELECT_INTERVAL else img_lst[0:1]
100
+
101
+ if segmap_mask_lst is not None:
102
+ segmap_mask_lst = segmap_mask_lst[::FRAME_SELECT_INTERVAL] if num_frames > FRAME_SELECT_INTERVAL else segmap_mask_lst[0:1]
103
+ assert len(img_lst) == len(segmap_mask_lst)
104
+ # get H/W
105
+ h, w = refresh_image(img_lst[0]).shape[:2]
106
+
107
+ # nearest neighbors
108
+ all_xys = np.mgrid[0:h, 0:w].reshape(2, -1).transpose() # [512*512, 2] coordinate grid
109
+ distss = []
110
+ for idx, img in tqdm.tqdm(enumerate(img_lst), desc='combining backgrounds...', total=len(img_lst)):
111
+ segmap = get_segmap_mask(img_lst=img_lst, segmap_mask_lst=segmap_mask_lst, index=idx)
112
+ bg = (segmap[0]).astype(bool) # [h,w] bool mask
113
+ fg_xys = np.stack(np.nonzero(~bg)).transpose(1, 0) # [N_nonbg,2] coordinate of non-bg pixels
114
+ nbrs = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(fg_xys)
115
+ dists, _ = nbrs.kneighbors(all_xys) # [512*512, 1] distance to nearest non-bg pixel
116
+ distss.append(dists)
117
+
118
+ distss = np.stack(distss) # [B, 512*512, 1]
119
+ max_dist = np.max(distss, 0) # [512*512, 1]
120
+ max_id = np.argmax(distss, 0) # id of frame
121
+
122
+ bc_pixs = max_dist > 10 # 在各个frame有一个出现过是bg的pixel,bg标准是离最近的non-bg pixel距离大于10
123
+ bc_pixs_id = np.nonzero(bc_pixs)
124
+ bc_ids = max_id[bc_pixs]
125
+
126
+ # TODO: maybe we should reimplement here to avoid memory costs?
127
+ # though there is upper limits of images here
128
+ num_pixs = distss.shape[1]
129
+ bg_img = np.zeros((h*w, 3), dtype=np.uint8)
130
+ img_lst = [refresh_image(img) for img in img_lst]
131
+ imgs = np.stack(img_lst).reshape(-1, num_pixs, 3)
132
+ bg_img[bc_pixs_id, :] = imgs[bc_ids, bc_pixs_id, :] # 对那些铁bg的pixel,直接去对应的image里面采样
133
+ bg_img = bg_img.reshape(h, w, 3)
134
+
135
+ max_dist = max_dist.reshape(h, w)
136
+ bc_pixs = max_dist > 10 # 5
137
+ bg_xys = np.stack(np.nonzero(~bc_pixs)).transpose()
138
+ fg_xys = np.stack(np.nonzero(bc_pixs)).transpose()
139
+ nbrs = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(fg_xys)
140
+ distances, indices = nbrs.kneighbors(bg_xys) # 对non-bg img,用KNN找最近的bg pixel
141
+ bg_fg_xys = fg_xys[indices[:, 0]]
142
+ bg_img[bg_xys[:, 0], bg_xys[:, 1], :] = bg_img[bg_fg_xys[:, 0], bg_fg_xys[:, 1], :]
143
+ else:
144
+ raise NotImplementedError # deperated
145
+
146
+ return bg_img
147
+
148
+ def inpaint_torso_job(gt_img, segmap):
149
+ bg_part = (segmap[0]).astype(bool)
150
+ head_part = (segmap[1] + segmap[3] + segmap[5]).astype(bool)
151
+ neck_part = (segmap[2]).astype(bool)
152
+ torso_part = (segmap[4]).astype(bool)
153
+ img = gt_img.copy()
154
+ img[head_part] = 0
155
+
156
+ # torso part "vertical" in-painting...
157
+ L = 8 + 1
158
+ torso_coords = np.stack(np.nonzero(torso_part), axis=-1) # [M, 2]
159
+ # lexsort: sort 2D coords first by y then by x,
160
+ # ref: https://stackoverflow.com/questions/2706605/sorting-a-2d-numpy-array-by-multiple-axes
161
+ inds = np.lexsort((torso_coords[:, 0], torso_coords[:, 1]))
162
+ torso_coords = torso_coords[inds]
163
+ # choose the top pixel for each column
164
+ u, uid, ucnt = np.unique(torso_coords[:, 1], return_index=True, return_counts=True)
165
+ top_torso_coords = torso_coords[uid] # [m, 2]
166
+ # only keep top-is-head pixels
167
+ top_torso_coords_up = top_torso_coords.copy() - np.array([1, 0]) # [N, 2]
168
+ mask = head_part[tuple(top_torso_coords_up.T)]
169
+ if mask.any():
170
+ top_torso_coords = top_torso_coords[mask]
171
+ # get the color
172
+ top_torso_colors = gt_img[tuple(top_torso_coords.T)] # [m, 3]
173
+ # construct inpaint coords (vertically up, or minus in x)
174
+ inpaint_torso_coords = top_torso_coords[None].repeat(L, 0) # [L, m, 2]
175
+ inpaint_offsets = np.stack([-np.arange(L), np.zeros(L, dtype=np.int32)], axis=-1)[:, None] # [L, 1, 2]
176
+ inpaint_torso_coords += inpaint_offsets
177
+ inpaint_torso_coords = inpaint_torso_coords.reshape(-1, 2) # [Lm, 2]
178
+ inpaint_torso_colors = top_torso_colors[None].repeat(L, 0) # [L, m, 3]
179
+ darken_scaler = 0.98 ** np.arange(L).reshape(L, 1, 1) # [L, 1, 1]
180
+ inpaint_torso_colors = (inpaint_torso_colors * darken_scaler).reshape(-1, 3) # [Lm, 3]
181
+ # set color
182
+ img[tuple(inpaint_torso_coords.T)] = inpaint_torso_colors
183
+ inpaint_torso_mask = np.zeros_like(img[..., 0]).astype(bool)
184
+ inpaint_torso_mask[tuple(inpaint_torso_coords.T)] = True
185
+ else:
186
+ inpaint_torso_mask = None
187
+
188
+ # neck part "vertical" in-painting...
189
+ push_down = 4
190
+ L = 48 + push_down + 1
191
+ neck_part = binary_dilation(neck_part, structure=np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=bool), iterations=3)
192
+ neck_coords = np.stack(np.nonzero(neck_part), axis=-1) # [M, 2]
193
+ # lexsort: sort 2D coords first by y then by x,
194
+ # ref: https://stackoverflow.com/questions/2706605/sorting-a-2d-numpy-array-by-multiple-axes
195
+ inds = np.lexsort((neck_coords[:, 0], neck_coords[:, 1]))
196
+ neck_coords = neck_coords[inds]
197
+ # choose the top pixel for each column
198
+ u, uid, ucnt = np.unique(neck_coords[:, 1], return_index=True, return_counts=True)
199
+ top_neck_coords = neck_coords[uid] # [m, 2]
200
+ # only keep top-is-head pixels
201
+ top_neck_coords_up = top_neck_coords.copy() - np.array([1, 0])
202
+ mask = head_part[tuple(top_neck_coords_up.T)]
203
+ top_neck_coords = top_neck_coords[mask]
204
+ # push these top down for 4 pixels to make the neck inpainting more natural...
205
+ offset_down = np.minimum(ucnt[mask] - 1, push_down)
206
+ top_neck_coords += np.stack([offset_down, np.zeros_like(offset_down)], axis=-1)
207
+ # get the color
208
+ top_neck_colors = gt_img[tuple(top_neck_coords.T)] # [m, 3]
209
+ # construct inpaint coords (vertically up, or minus in x)
210
+ inpaint_neck_coords = top_neck_coords[None].repeat(L, 0) # [L, m, 2]
211
+ inpaint_offsets = np.stack([-np.arange(L), np.zeros(L, dtype=np.int32)], axis=-1)[:, None] # [L, 1, 2]
212
+ inpaint_neck_coords += inpaint_offsets
213
+ inpaint_neck_coords = inpaint_neck_coords.reshape(-1, 2) # [Lm, 2]
214
+ inpaint_neck_colors = top_neck_colors[None].repeat(L, 0) # [L, m, 3]
215
+ darken_scaler = 0.98 ** np.arange(L).reshape(L, 1, 1) # [L, 1, 1]
216
+ inpaint_neck_colors = (inpaint_neck_colors * darken_scaler).reshape(-1, 3) # [Lm, 3]
217
+ # set color
218
+ img[tuple(inpaint_neck_coords.T)] = inpaint_neck_colors
219
+ # apply blurring to the inpaint area to avoid vertical-line artifects...
220
+ inpaint_mask = np.zeros_like(img[..., 0]).astype(bool)
221
+ inpaint_mask[tuple(inpaint_neck_coords.T)] = True
222
+
223
+ blur_img = img.copy()
224
+ blur_img = cv2.GaussianBlur(blur_img, (5, 5), cv2.BORDER_DEFAULT)
225
+ img[inpaint_mask] = blur_img[inpaint_mask]
226
+
227
+ # set mask
228
+ torso_img_mask = (neck_part | torso_part | inpaint_mask)
229
+ torso_with_bg_img_mask = (bg_part | neck_part | torso_part | inpaint_mask)
230
+ if inpaint_torso_mask is not None:
231
+ torso_img_mask = torso_img_mask | inpaint_torso_mask
232
+ torso_with_bg_img_mask = torso_with_bg_img_mask | inpaint_torso_mask
233
+
234
+ torso_img = img.copy()
235
+ torso_img[~torso_img_mask] = 0
236
+ torso_with_bg_img = img.copy()
237
+ torso_img[~torso_with_bg_img_mask] = 0
238
+
239
+ return torso_img, torso_img_mask, torso_with_bg_img, torso_with_bg_img_mask
240
+
241
+ def load_segment_mask_from_file(filename: str):
242
+ encoded_segmap = load_rgb_image_to_path(filename)
243
+ segmap_mask = decode_segmap_mask_from_image(encoded_segmap)
244
+ return segmap_mask
245
+
246
+ # load segment mask to memory if not loaded yet
247
+ def refresh_segment_mask(segmap_mask: Union[str, np.ndarray]):
248
+ if isinstance(segmap_mask, str):
249
+ segmap_mask = load_segment_mask_from_file(segmap_mask)
250
+ return segmap_mask
251
+
252
+ # load segment mask to memory if not loaded yet
253
+ def refresh_image(image: Union[str, np.ndarray]):
254
+ if isinstance(image, str):
255
+ image = load_rgb_image_to_path(image)
256
+ return image
257
+
258
+ def generate_segment_imgs_job(img_name, segmap, img):
259
+ out_img_name = segmap_name = img_name.replace("/gt_imgs/", "/segmaps/").replace(".jpg", ".png") # 存成jpg的话,pixel value会有误差
260
+ try: os.makedirs(os.path.dirname(out_img_name), exist_ok=True)
261
+ except: pass
262
+ encoded_segmap = encode_segmap_mask_to_image(segmap)
263
+ save_rgb_image_to_path(encoded_segmap, out_img_name)
264
+
265
+ for mode in ['head', 'torso', 'person', 'bg']:
266
+ out_img, mask = seg_model._seg_out_img_with_segmap(img, segmap, mode=mode)
267
+ img_alpha = 255 * np.ones((img.shape[0], img.shape[1], 1), dtype=np.uint8) # alpha
268
+ mask = mask[0][..., None]
269
+ img_alpha[~mask] = 0
270
+ out_img_name = img_name.replace("/gt_imgs/", f"/{mode}_imgs/").replace(".jpg", ".png")
271
+ save_rgb_alpha_image_to_path(out_img, img_alpha, out_img_name)
272
+
273
+ inpaint_torso_img, inpaint_torso_img_mask, inpaint_torso_with_bg_img, inpaint_torso_with_bg_img_mask = inpaint_torso_job(img, segmap)
274
+ img_alpha = 255 * np.ones((img.shape[0], img.shape[1], 1), dtype=np.uint8) # alpha
275
+ img_alpha[~inpaint_torso_img_mask[..., None]] = 0
276
+ out_img_name = img_name.replace("/gt_imgs/", f"/inpaint_torso_imgs/").replace(".jpg", ".png")
277
+ save_rgb_alpha_image_to_path(inpaint_torso_img, img_alpha, out_img_name)
278
+ return segmap_name
279
+
280
+ def segment_and_generate_for_image_job(img_name, img, segmenter_options=None, segmenter=None, store_in_memory=False):
281
+ img = refresh_image(img)
282
+ segmap_mask, segmap_image = job_cal_seg_map_for_image(img, segmenter_options=segmenter_options, segmenter=segmenter)
283
+ segmap_name = generate_segment_imgs_job(img_name=img_name, segmap=segmap_mask, img=img)
284
+ if store_in_memory:
285
+ return segmap_mask
286
+ else:
287
+ return segmap_name
288
+
289
+ def extract_segment_job(
290
+ video_name,
291
+ nerf=False,
292
+ background_method='knn',
293
+ device="cpu",
294
+ total_gpus=0,
295
+ mix_bg=True,
296
+ store_in_memory=False, # set to True to speed up a bit of preprocess, but leads to HUGE memory costs (100GB for 5-min video)
297
+ force_single_process=False, # turn this on if you find multi-process does not work on your environment
298
+ ):
299
+ global segmenter
300
+ global seg_model
301
+ del segmenter
302
+ del seg_model
303
+ seg_model = MediapipeSegmenter()
304
+ segmenter = vision.ImageSegmenter.create_from_options(seg_model.options)
305
+ # nerf means that we extract only one video, so can enable multi-process acceleration
306
+ multiprocess_enable = nerf and not force_single_process
307
+ try:
308
+ if "cuda" in device:
309
+ # determine which cuda index from subprocess id
310
+ pname = multiprocessing.current_process().name
311
+ pid = int(pname.rsplit("-", 1)[-1]) - 1
312
+ cuda_id = pid % total_gpus
313
+ device = f"cuda:{cuda_id}"
314
+
315
+ if nerf: # single video
316
+ raw_img_dir = video_name.replace(".mp4", "/gt_imgs/").replace("/raw/","/processed/")
317
+ else: # whole dataset
318
+ raw_img_dir = video_name.replace(".mp4", "").replace("/video/", "/gt_imgs/")
319
+ if not os.path.exists(raw_img_dir):
320
+ extract_img_job(video_name, raw_img_dir) # use ffmpeg to split video into imgs
321
+
322
+ img_names = glob.glob(os.path.join(raw_img_dir, "*.jpg"))
323
+
324
+ img_lst = []
325
+
326
+ for img_name in img_names:
327
+ if store_in_memory:
328
+ img = load_rgb_image_to_path(img_name)
329
+ else:
330
+ img = img_name
331
+ img_lst.append(img)
332
+
333
+ print("| Extracting Segmaps && Saving...")
334
+ args = []
335
+ segmap_mask_lst = []
336
+ # preparing parameters for segment
337
+ for i in range(len(img_lst)):
338
+ img_name = img_names[i]
339
+ img = img_lst[i]
340
+ if multiprocess_enable: # create seg_model in subprocesses here
341
+ options = seg_model.options
342
+ segmenter_arg = None
343
+ else: # use seg_model of this process
344
+ options = None
345
+ segmenter_arg = segmenter
346
+ arg = (img_name, img, options, segmenter_arg, store_in_memory)
347
+ args.append(arg)
348
+
349
+ if multiprocess_enable:
350
+ for (_, res) in multiprocess_run_tqdm(segment_and_generate_for_image_job, args=args, num_workers=16, desc='generating segment images in multi-processes...'):
351
+ segmap_mask = res
352
+ segmap_mask_lst.append(segmap_mask)
353
+ else:
354
+ for index in tqdm.tqdm(range(len(img_lst)), desc="generating segment images in single-process..."):
355
+ segmap_mask = segment_and_generate_for_image_job(*args[index])
356
+ segmap_mask_lst.append(segmap_mask)
357
+ print("| Extracted Segmaps Done.")
358
+
359
+ print("| Extracting background...")
360
+ bg_prefix_name = f"bg{BG_NAME_MAP[background_method]}"
361
+ bg_img = extract_background(img_lst, segmap_mask_lst, method=background_method, device=device, mix_bg=mix_bg)
362
+ if nerf:
363
+ out_img_name = video_name.replace("/raw/", "/processed/").replace(".mp4", f"/{bg_prefix_name}.jpg")
364
+ else:
365
+ out_img_name = video_name.replace("/video/", f"/{bg_prefix_name}_img/").replace(".mp4", ".jpg")
366
+ save_rgb_image_to_path(bg_img, out_img_name)
367
+ print("| Extracted background done.")
368
+
369
+ print("| Extracting com_imgs...")
370
+ com_prefix_name = f"com{BG_NAME_MAP[background_method]}"
371
+ for i in tqdm.trange(len(img_names), desc='extracting com_imgs'):
372
+ img_name = img_names[i]
373
+ com_img = refresh_image(img_lst[i]).copy()
374
+ segmap = refresh_segment_mask(segmap_mask_lst[i])
375
+ bg_part = segmap[0].astype(bool)[..., None].repeat(3,axis=-1)
376
+ com_img[bg_part] = bg_img[bg_part]
377
+ out_img_name = img_name.replace("/gt_imgs/", f"/{com_prefix_name}_imgs/")
378
+ save_rgb_image_to_path(com_img, out_img_name)
379
+ print("| Extracted com_imgs done.")
380
+
381
+ return 0
382
+ except Exception as e:
383
+ print(str(type(e)), e)
384
+ traceback.print_exc(e)
385
+ return 1
386
+
387
+ def out_exist_job(vid_name, background_method='knn'):
388
+ com_prefix_name = f"com{BG_NAME_MAP[background_method]}"
389
+ img_dir = vid_name.replace("/video/", "/gt_imgs/").replace(".mp4", "")
390
+ out_dir1 = img_dir.replace("/gt_imgs/", "/head_imgs/")
391
+ out_dir2 = img_dir.replace("/gt_imgs/", f"/{com_prefix_name}_imgs/")
392
+
393
+ if os.path.exists(img_dir) and os.path.exists(out_dir1) and os.path.exists(out_dir1) and os.path.exists(out_dir2) :
394
+ num_frames = len(os.listdir(img_dir))
395
+ if len(os.listdir(out_dir1)) == num_frames and len(os.listdir(out_dir2)) == num_frames:
396
+ return None
397
+ else:
398
+ return vid_name
399
+ else:
400
+ return vid_name
401
+
402
+ def get_todo_vid_names(vid_names, background_method='knn'):
403
+ if len(vid_names) == 1: # nerf
404
+ return vid_names
405
+ todo_vid_names = []
406
+ fn_args = [(vid_name, background_method) for vid_name in vid_names]
407
+ for i, res in multiprocess_run_tqdm(out_exist_job, fn_args, num_workers=16, desc="checking todo videos..."):
408
+ if res is not None:
409
+ todo_vid_names.append(res)
410
+ return todo_vid_names
411
+
412
+ if __name__ == '__main__':
413
+ import argparse, glob, tqdm, random
414
+ parser = argparse.ArgumentParser()
415
+ parser.add_argument("--vid_dir", default='/home/tiger/datasets/raw/TH1KH_512/video')
416
+ parser.add_argument("--ds_name", default='TH1KH_512')
417
+ parser.add_argument("--num_workers", default=48, type=int)
418
+ parser.add_argument("--seed", default=0, type=int)
419
+ parser.add_argument("--process_id", default=0, type=int)
420
+ parser.add_argument("--total_process", default=1, type=int)
421
+ parser.add_argument("--reset", action='store_true')
422
+ parser.add_argument("--load_names", action="store_true")
423
+ parser.add_argument("--background_method", choices=['knn', 'mat', 'ddnm', 'lama'], type=str, default='knn')
424
+ parser.add_argument("--total_gpus", default=0, type=int) # zero gpus means utilizing cpu
425
+ parser.add_argument("--no_mix_bg", action="store_true")
426
+ parser.add_argument("--store_in_memory", action="store_true") # set to True to speed up preprocess, but leads to high memory costs
427
+ parser.add_argument("--force_single_process", action="store_true") # turn this on if you find multi-process does not work on your environment
428
+
429
+ args = parser.parse_args()
430
+ vid_dir = args.vid_dir
431
+ ds_name = args.ds_name
432
+ load_names = args.load_names
433
+ background_method = args.background_method
434
+ total_gpus = args.total_gpus
435
+ mix_bg = not args.no_mix_bg
436
+ store_in_memory = args.store_in_memory
437
+ force_single_process = args.force_single_process
438
+
439
+ devices = os.environ.get('CUDA_VISIBLE_DEVICES', '').split(",")
440
+ for d in devices[:total_gpus]:
441
+ os.system(f'pkill -f "voidgpu{d}"')
442
+
443
+ if ds_name.lower() == 'nerf': # 处理单个视频
444
+ vid_names = [vid_dir]
445
+ out_names = [video_name.replace("/raw/", "/processed/").replace(".mp4","_lms.npy") for video_name in vid_names]
446
+ else: # 处理整个数据集
447
+ if ds_name in ['lrs3_trainval']:
448
+ vid_name_pattern = os.path.join(vid_dir, "*/*.mp4")
449
+ elif ds_name in ['TH1KH_512', 'CelebV-HQ']:
450
+ vid_name_pattern = os.path.join(vid_dir, "*.mp4")
451
+ elif ds_name in ['lrs2', 'lrs3', 'voxceleb2']:
452
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*.mp4")
453
+ elif ds_name in ["RAVDESS", 'VFHQ']:
454
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*/*.mp4")
455
+ else:
456
+ raise NotImplementedError()
457
+
458
+ vid_names_path = os.path.join(vid_dir, "vid_names.pkl")
459
+ if os.path.exists(vid_names_path) and load_names:
460
+ print(f"loading vid names from {vid_names_path}")
461
+ vid_names = load_file(vid_names_path)
462
+ else:
463
+ vid_names = multiprocess_glob(vid_name_pattern)
464
+ vid_names = sorted(vid_names)
465
+ print(f"saving vid names to {vid_names_path}")
466
+ save_file(vid_names_path, vid_names)
467
+
468
+ vid_names = sorted(vid_names)
469
+ random.seed(args.seed)
470
+ random.shuffle(vid_names)
471
+
472
+ process_id = args.process_id
473
+ total_process = args.total_process
474
+ if total_process > 1:
475
+ assert process_id <= total_process -1
476
+ num_samples_per_process = len(vid_names) // total_process
477
+ if process_id == total_process:
478
+ vid_names = vid_names[process_id * num_samples_per_process : ]
479
+ else:
480
+ vid_names = vid_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
481
+
482
+ if not args.reset:
483
+ vid_names = get_todo_vid_names(vid_names, background_method)
484
+ print(f"todo videos number: {len(vid_names)}")
485
+
486
+ device = "cuda" if total_gpus > 0 else "cpu"
487
+ extract_job = extract_segment_job
488
+ fn_args = [(vid_name, ds_name=='nerf', background_method, device, total_gpus, mix_bg, store_in_memory, force_single_process) for i, vid_name in enumerate(vid_names)]
489
+
490
+ if ds_name == 'nerf': # 处理单个视频
491
+ extract_job(*fn_args[0])
492
+ else:
493
+ for vid_name in multiprocess_run_tqdm(extract_job, fn_args, desc=f"Root process {args.process_id}: segment images", num_workers=args.num_workers):
494
+ pass
data_gen/utils/process_video/fit_3dmm_landmark.py ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a script for efficienct 3DMM coefficient extraction.
2
+ # It could reconstruct accurate 3D face in real-time.
3
+ # It is built upon BFM 2009 model and mediapipe landmark extractor.
4
+ # It is authored by ZhenhuiYe ([email protected]), free to contact him for any suggestion on improvement!
5
+
6
+ from numpy.core.numeric import require
7
+ from numpy.lib.function_base import quantile
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import copy
11
+ import numpy as np
12
+
13
+ import random
14
+ import pickle
15
+ import os
16
+ import sys
17
+ import cv2
18
+ import argparse
19
+ import tqdm
20
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
21
+ from data_gen.utils.mp_feature_extractors.face_landmarker import MediapipeLandmarker, read_video_to_frames
22
+ from deep_3drecon.deep_3drecon_models.bfm import ParametricFaceModel
23
+ from deep_3drecon.secc_renderer import SECC_Renderer
24
+ from utils.commons.os_utils import multiprocess_glob
25
+
26
+
27
+ face_model = ParametricFaceModel(bfm_folder='deep_3drecon/BFM',
28
+ camera_distance=10, focal=1015, keypoint_mode='mediapipe')
29
+ face_model.to(torch.device("cuda:0"))
30
+
31
+ dir_path = os.path.dirname(os.path.realpath(__file__))
32
+
33
+
34
+ def draw_axes(img, pitch, yaw, roll, tx, ty, size=50):
35
+ # yaw = -yaw
36
+ pitch = - pitch
37
+ roll = - roll
38
+ rotation_matrix = cv2.Rodrigues(np.array([pitch, yaw, roll]))[0].astype(np.float64)
39
+ axes_points = np.array([
40
+ [1, 0, 0, 0],
41
+ [0, 1, 0, 0],
42
+ [0, 0, 1, 0]
43
+ ], dtype=np.float64)
44
+ axes_points = rotation_matrix @ axes_points
45
+ axes_points = (axes_points[:2, :] * size).astype(int)
46
+ axes_points[0, :] = axes_points[0, :] + tx
47
+ axes_points[1, :] = axes_points[1, :] + ty
48
+
49
+ new_img = img.copy()
50
+ cv2.line(new_img, tuple(axes_points[:, 3].ravel()), tuple(axes_points[:, 0].ravel()), (255, 0, 0), 3)
51
+ cv2.line(new_img, tuple(axes_points[:, 3].ravel()), tuple(axes_points[:, 1].ravel()), (0, 255, 0), 3)
52
+ cv2.line(new_img, tuple(axes_points[:, 3].ravel()), tuple(axes_points[:, 2].ravel()), (0, 0, 255), 3)
53
+ return new_img
54
+
55
+ def save_file(name, content):
56
+ with open(name, "wb") as f:
57
+ pickle.dump(content, f)
58
+
59
+ def load_file(name):
60
+ with open(name, "rb") as f:
61
+ content = pickle.load(f)
62
+ return content
63
+
64
+ def cal_lap_loss(in_tensor):
65
+ # [T, 68, 2]
66
+ t = in_tensor.shape[0]
67
+ in_tensor = in_tensor.reshape([t, -1]).permute(1,0).unsqueeze(1) # [c, 1, t]
68
+ in_tensor = torch.cat([in_tensor[:, :, 0:1], in_tensor, in_tensor[:, :, -1:]], dim=-1)
69
+ lap_kernel = torch.Tensor((-0.5, 1.0, -0.5)).reshape([1,1,3]).float().to(in_tensor.device) # [1, 1, kw]
70
+ loss_lap = 0
71
+
72
+ out_tensor = F.conv1d(in_tensor, lap_kernel)
73
+ loss_lap += torch.mean(out_tensor**2)
74
+ return loss_lap
75
+
76
+ def cal_vel_loss(ldm):
77
+ # [B, 68, 2]
78
+ vel = ldm[1:] - ldm[:-1]
79
+ return torch.mean(torch.abs(vel))
80
+
81
+ def cal_lan_loss(proj_lan, gt_lan):
82
+ # [B, 68, 2]
83
+ loss = (proj_lan - gt_lan)** 2
84
+ # use the ldm weights from deep3drecon, see deep_3drecon/deep_3drecon_models/losses.py
85
+ weights = torch.zeros_like(loss)
86
+ weights = torch.ones_like(loss)
87
+ weights[:, 36:48, :] = 3 # eye 12 points
88
+ weights[:, -8:, :] = 3 # inner lip 8 points
89
+ weights[:, 28:31, :] = 3 # nose 3 points
90
+ loss = loss * weights
91
+ return torch.mean(loss)
92
+
93
+ def cal_lan_loss_mp(proj_lan, gt_lan, mean:bool=True):
94
+ # [B, 68, 2]
95
+ loss = (proj_lan - gt_lan).pow(2)
96
+ # loss = (proj_lan - gt_lan).abs()
97
+ unmatch_mask = [ 93, 127, 132, 234, 323, 356, 361, 454]
98
+ upper_eye = [161,160,159,158,157] + [388,387,386,385,384]
99
+ eye = [33,246,161,160,159,158,157,173,133,155,154,153,145,144,163,7] + [263,466,388,387,386,385,384,398,362,382,381,380,374,373,390,249]
100
+ inner_lip = [78,191,80,81,82,13,312,311,310,415,308,324,318,402,317,14,87,178,88,95]
101
+ outer_lip = [61,185,40,39,37,0,267,269,270,409,291,375,321,405,314,17,84,181,91,146]
102
+ weights = torch.ones_like(loss)
103
+ weights[:, eye] = 3
104
+ weights[:, upper_eye] = 20
105
+ weights[:, inner_lip] = 5
106
+ weights[:, outer_lip] = 5
107
+ weights[:, unmatch_mask] = 0
108
+ loss = loss * weights
109
+ if mean:
110
+ loss = torch.mean(loss)
111
+ return loss
112
+
113
+ def cal_acceleration_loss(trans):
114
+ vel = trans[1:] - trans[:-1]
115
+ acc = vel[1:] - vel[:-1]
116
+ return torch.mean(torch.abs(acc))
117
+
118
+ def cal_acceleration_ldm_loss(ldm):
119
+ # [B, 68, 2]
120
+ vel = ldm[1:] - ldm[:-1]
121
+ acc = vel[1:] - vel[:-1]
122
+ lip_weight = 0.25 # we dont want smooth the lip too much
123
+ acc[48:68] *= lip_weight
124
+ return torch.mean(torch.abs(acc))
125
+
126
+ def set_requires_grad(tensor_list):
127
+ for tensor in tensor_list:
128
+ tensor.requires_grad = True
129
+
130
+ @torch.enable_grad()
131
+ def fit_3dmm_for_a_video(
132
+ video_name,
133
+ nerf=False, # use the file name convention for GeneFace++
134
+ id_mode='global',
135
+ debug=False,
136
+ keypoint_mode='mediapipe',
137
+ large_yaw_threshold=9999999.9,
138
+ save=True
139
+ ) -> bool: # True: good, False: bad
140
+ assert video_name.endswith(".mp4"), "this function only support video as input"
141
+ if id_mode == 'global':
142
+ LAMBDA_REG_ID = 0.2
143
+ LAMBDA_REG_EXP = 0.6
144
+ LAMBDA_REG_LAP = 1.0
145
+ LAMBDA_REG_VEL_ID = 0.0 # laplcaian is all you need for temporal consistency
146
+ LAMBDA_REG_VEL_EXP = 0.0 # laplcaian is all you need for temporal consistency
147
+ else:
148
+ LAMBDA_REG_ID = 0.3
149
+ LAMBDA_REG_EXP = 0.05
150
+ LAMBDA_REG_LAP = 1.0
151
+ LAMBDA_REG_VEL_ID = 0.0 # laplcaian is all you need for temporal consistency
152
+ LAMBDA_REG_VEL_EXP = 0.0 # laplcaian is all you need for temporal consistency
153
+
154
+ frames = read_video_to_frames(video_name) # [T, H, W, 3]
155
+ img_h, img_w = frames.shape[1], frames.shape[2]
156
+ assert img_h == img_w
157
+ num_frames = len(frames)
158
+
159
+ if nerf: # single video
160
+ lm_name = video_name.replace("/raw/", "/processed/").replace(".mp4","/lms_2d.npy")
161
+ else:
162
+ lm_name = video_name.replace("/video/", "/lms_2d/").replace(".mp4", "_lms.npy")
163
+
164
+ if os.path.exists(lm_name):
165
+ lms = np.load(lm_name)
166
+ else:
167
+ print(f"lms_2d file not found, try to extract it from video... {lm_name}")
168
+ try:
169
+ landmarker = MediapipeLandmarker()
170
+ img_lm478, vid_lm478 = landmarker.extract_lm478_from_frames(frames, anti_smooth_factor=20)
171
+ lms = landmarker.combine_vid_img_lm478_to_lm478(img_lm478, vid_lm478)
172
+ except Exception as e:
173
+ print(e)
174
+ return False
175
+ if lms is None:
176
+ print(f"get None lms_2d, please check whether each frame has one head, exiting... {lm_name}")
177
+ return False
178
+ lms = lms[:, :468, :]
179
+ lms = torch.FloatTensor(lms).cuda()
180
+ lms[..., 1] = img_h - lms[..., 1] # flip the height axis
181
+
182
+ if keypoint_mode == 'mediapipe':
183
+ # default
184
+ cal_lan_loss_fn = cal_lan_loss_mp
185
+ if nerf: # single video
186
+ out_name = video_name.replace("/raw/", "/processed/").replace(".mp4", "/coeff_fit_mp.npy")
187
+ else:
188
+ out_name = video_name.replace("/video/", "/coeff_fit_mp/").replace(".mp4", "_coeff_fit_mp.npy")
189
+ else:
190
+ # lm68 is less accurate than mp
191
+ cal_lan_loss_fn = cal_lan_loss
192
+ if nerf: # single video
193
+ out_name = video_name.replace("/raw/", "/processed/").replace(".mp4", "_coeff_fit_lm68.npy")
194
+ else:
195
+ out_name = video_name.replace("/video/", "/coeff_fit_lm68/").replace(".mp4", "_coeff_fit_lm68.npy")
196
+ try:
197
+ os.makedirs(os.path.dirname(out_name), exist_ok=True)
198
+ except:
199
+ pass
200
+
201
+ id_dim, exp_dim = 80, 64
202
+ sel_ids = np.arange(0, num_frames, 40)
203
+
204
+ h = w = face_model.center * 2
205
+ img_scale_factor = img_h / h
206
+ lms /= img_scale_factor # rescale lms into [0,224]
207
+
208
+ if id_mode == 'global':
209
+ # default choice by GeneFace++ and later works
210
+ id_para = lms.new_zeros((1, id_dim), requires_grad=True)
211
+ elif id_mode == 'finegrained':
212
+ # legacy choice by GeneFace1 (ICLR 2023)
213
+ id_para = lms.new_zeros((num_frames, id_dim), requires_grad=True)
214
+ else: raise NotImplementedError(f"id mode {id_mode} not supported! we only support global or finegrained.")
215
+ exp_para = lms.new_zeros((num_frames, exp_dim), requires_grad=True)
216
+ euler_angle = lms.new_zeros((num_frames, 3), requires_grad=True)
217
+ trans = lms.new_zeros((num_frames, 3), requires_grad=True)
218
+
219
+ set_requires_grad([id_para, exp_para, euler_angle, trans])
220
+
221
+ optimizer_idexp = torch.optim.Adam([id_para, exp_para], lr=.1)
222
+ optimizer_frame = torch.optim.Adam([euler_angle, trans], lr=.1)
223
+
224
+ # 其他参数初始化,先训练euler和trans
225
+ for _ in range(200):
226
+ if id_mode == 'global':
227
+ proj_geo = face_model.compute_for_landmark_fit(
228
+ id_para.expand((num_frames, id_dim)), exp_para, euler_angle, trans)
229
+ else:
230
+ proj_geo = face_model.compute_for_landmark_fit(
231
+ id_para, exp_para, euler_angle, trans)
232
+ loss_lan = cal_lan_loss_fn(proj_geo[:, :, :2], lms.detach())
233
+ loss = loss_lan
234
+ optimizer_frame.zero_grad()
235
+ loss.backward()
236
+ optimizer_frame.step()
237
+
238
+ # print(f"loss_lan: {loss_lan.item():.2f}, euler_abs_mean: {euler_angle.abs().mean().item():.4f}, euler_std: {euler_angle.std().item():.4f}, euler_min: {euler_angle.min().item():.4f}, euler_max: {euler_angle.max().item():.4f}")
239
+ # print(f"trans_z_mean: {trans[...,2].mean().item():.4f}, trans_z_std: {trans[...,2].std().item():.4f}, trans_min: {trans[...,2].min().item():.4f}, trans_max: {trans[...,2].max().item():.4f}")
240
+
241
+ for param_group in optimizer_frame.param_groups:
242
+ param_group['lr'] = 0.1
243
+
244
+ # "jointly roughly training id exp euler trans"
245
+ for _ in range(200):
246
+ ret = {}
247
+ if id_mode == 'global':
248
+ proj_geo = face_model.compute_for_landmark_fit(
249
+ id_para.expand((num_frames, id_dim)), exp_para, euler_angle, trans, ret)
250
+ else:
251
+ proj_geo = face_model.compute_for_landmark_fit(
252
+ id_para, exp_para, euler_angle, trans, ret)
253
+ loss_lan = cal_lan_loss_fn(
254
+ proj_geo[:, :, :2], lms.detach())
255
+ # loss_lap = cal_lap_loss(proj_geo)
256
+ # laplacian对euler影响不大,但是对trans的提升很大
257
+ loss_lap = cal_lap_loss(id_para) + cal_lap_loss(exp_para) + cal_lap_loss(euler_angle) * 0.3 + cal_lap_loss(trans) * 0.3
258
+
259
+ loss_regid = torch.mean(id_para*id_para) # 正则化
260
+ loss_regexp = torch.mean(exp_para * exp_para)
261
+
262
+ loss_vel_id = cal_vel_loss(id_para)
263
+ loss_vel_exp = cal_vel_loss(exp_para)
264
+ loss = loss_lan + loss_regid * LAMBDA_REG_ID + loss_regexp * LAMBDA_REG_EXP + loss_vel_id * LAMBDA_REG_VEL_ID + loss_vel_exp * LAMBDA_REG_VEL_EXP + loss_lap * LAMBDA_REG_LAP
265
+ optimizer_idexp.zero_grad()
266
+ optimizer_frame.zero_grad()
267
+ loss.backward()
268
+ optimizer_idexp.step()
269
+ optimizer_frame.step()
270
+
271
+ # print(f"loss_lan: {loss_lan.item():.2f}, loss_reg_id: {loss_regid.item():.2f},loss_reg_exp: {loss_regexp.item():.2f},")
272
+ # print(f"euler_abs_mean: {euler_angle.abs().mean().item():.4f}, euler_std: {euler_angle.std().item():.4f}, euler_min: {euler_angle.min().item():.4f}, euler_max: {euler_angle.max().item():.4f}")
273
+ # print(f"trans_z_mean: {trans[...,2].mean().item():.4f}, trans_z_std: {trans[...,2].std().item():.4f}, trans_min: {trans[...,2].min().item():.4f}, trans_max: {trans[...,2].max().item():.4f}")
274
+
275
+ # start fine training, intialize from the roughly trained results
276
+ if id_mode == 'global':
277
+ id_para_ = lms.new_zeros((1, id_dim), requires_grad=False)
278
+ else:
279
+ id_para_ = lms.new_zeros((num_frames, id_dim), requires_grad=True)
280
+ id_para_.data = id_para.data.clone()
281
+ id_para = id_para_
282
+ exp_para_ = lms.new_zeros((num_frames, exp_dim), requires_grad=True)
283
+ exp_para_.data = exp_para.data.clone()
284
+ exp_para = exp_para_
285
+ euler_angle_ = lms.new_zeros((num_frames, 3), requires_grad=True)
286
+ euler_angle_.data = euler_angle.data.clone()
287
+ euler_angle = euler_angle_
288
+ trans_ = lms.new_zeros((num_frames, 3), requires_grad=True)
289
+ trans_.data = trans.data.clone()
290
+ trans = trans_
291
+
292
+ batch_size = 50
293
+ # "fine fitting the 3DMM in batches"
294
+ for i in range(int((num_frames-1)/batch_size+1)):
295
+ if (i+1)*batch_size > num_frames:
296
+ start_n = num_frames-batch_size
297
+ sel_ids = np.arange(max(num_frames-batch_size,0), num_frames)
298
+ else:
299
+ start_n = i*batch_size
300
+ sel_ids = np.arange(i*batch_size, i*batch_size+batch_size)
301
+ sel_lms = lms[sel_ids]
302
+
303
+ if id_mode == 'global':
304
+ sel_id_para = id_para.expand((sel_ids.shape[0], id_dim))
305
+ else:
306
+ sel_id_para = id_para.new_zeros((batch_size, id_dim), requires_grad=True)
307
+ sel_id_para.data = id_para[sel_ids].clone()
308
+ sel_exp_para = exp_para.new_zeros(
309
+ (batch_size, exp_dim), requires_grad=True)
310
+ sel_exp_para.data = exp_para[sel_ids].clone()
311
+ sel_euler_angle = euler_angle.new_zeros(
312
+ (batch_size, 3), requires_grad=True)
313
+ sel_euler_angle.data = euler_angle[sel_ids].clone()
314
+ sel_trans = trans.new_zeros((batch_size, 3), requires_grad=True)
315
+ sel_trans.data = trans[sel_ids].clone()
316
+
317
+ if id_mode == 'global':
318
+ set_requires_grad([sel_exp_para, sel_euler_angle, sel_trans])
319
+ optimizer_cur_batch = torch.optim.Adam(
320
+ [sel_exp_para, sel_euler_angle, sel_trans], lr=0.005)
321
+ else:
322
+ set_requires_grad([sel_id_para, sel_exp_para, sel_euler_angle, sel_trans])
323
+ optimizer_cur_batch = torch.optim.Adam(
324
+ [sel_id_para, sel_exp_para, sel_euler_angle, sel_trans], lr=0.005)
325
+
326
+ for j in range(50):
327
+ ret = {}
328
+ proj_geo = face_model.compute_for_landmark_fit(
329
+ sel_id_para, sel_exp_para, sel_euler_angle, sel_trans, ret)
330
+ loss_lan = cal_lan_loss_fn(
331
+ proj_geo[:, :, :2], lms[sel_ids].detach())
332
+
333
+ # loss_lap = cal_lap_loss(proj_geo)
334
+ loss_lap = cal_lap_loss(sel_id_para) + cal_lap_loss(sel_exp_para) + cal_lap_loss(sel_euler_angle) * 0.3 + cal_lap_loss(sel_trans) * 0.3
335
+ loss_vel_id = cal_vel_loss(sel_id_para)
336
+ loss_vel_exp = cal_vel_loss(sel_exp_para)
337
+ log_dict = {
338
+ 'loss_vel_id': loss_vel_id,
339
+ 'loss_vel_exp': loss_vel_exp,
340
+ 'loss_vel_euler': cal_vel_loss(sel_euler_angle),
341
+ 'loss_vel_trans': cal_vel_loss(sel_trans),
342
+ }
343
+ loss_regid = torch.mean(sel_id_para*sel_id_para) # 正则化
344
+ loss_regexp = torch.mean(sel_exp_para*sel_exp_para)
345
+ loss = loss_lan + loss_regid * LAMBDA_REG_ID + loss_regexp * LAMBDA_REG_EXP + loss_lap * LAMBDA_REG_LAP + loss_vel_id * LAMBDA_REG_VEL_ID + loss_vel_exp * LAMBDA_REG_VEL_EXP
346
+
347
+ optimizer_cur_batch.zero_grad()
348
+ loss.backward()
349
+ optimizer_cur_batch.step()
350
+
351
+ if debug:
352
+ print(f"batch {i} | loss_lan: {loss_lan.item():.2f}, loss_reg_id: {loss_regid.item():.2f},loss_reg_exp: {loss_regexp.item():.2f},loss_lap_ldm:{loss_lap.item():.4f}")
353
+ print("|--------" + ', '.join([f"{k}: {v:.4f}" for k,v in log_dict.items()]))
354
+ if id_mode != 'global':
355
+ id_para[sel_ids].data = sel_id_para.data.clone()
356
+ exp_para[sel_ids].data = sel_exp_para.data.clone()
357
+ euler_angle[sel_ids].data = sel_euler_angle.data.clone()
358
+ trans[sel_ids].data = sel_trans.data.clone()
359
+
360
+ coeff_dict = {'id': id_para.detach().cpu().numpy(), 'exp': exp_para.detach().cpu().numpy(),
361
+ 'euler': euler_angle.detach().cpu().numpy(), 'trans': trans.detach().cpu().numpy()}
362
+
363
+ # filter data by side-view pose
364
+ # bad_yaw = False
365
+ # yaws = [] # not so accurate
366
+ # for index in range(coeff_dict["trans"].shape[0]):
367
+ # yaw = coeff_dict["euler"][index][1]
368
+ # yaw = np.abs(yaw)
369
+ # yaws.append(yaw)
370
+ # if yaw > large_yaw_threshold:
371
+ # bad_yaw = True
372
+
373
+ if debug:
374
+ import imageio
375
+ from utils.visualization.vis_cam3d.camera_pose_visualizer import CameraPoseVisualizer
376
+ from data_util.face3d_helper import Face3DHelper
377
+ from data_gen.utils.process_video.extract_blink import get_eye_area_percent
378
+ face3d_helper = Face3DHelper('deep_3drecon/BFM', keypoint_mode='mediapipe')
379
+
380
+ t = coeff_dict['exp'].shape[0]
381
+ if len(coeff_dict['id']) == 1:
382
+ coeff_dict['id'] = np.repeat(coeff_dict['id'], t, axis=0)
383
+ idexp_lm3d = face3d_helper.reconstruct_idexp_lm3d_np(coeff_dict['id'], coeff_dict['exp']).reshape([t, -1])
384
+ cano_lm3d = idexp_lm3d / 10 + face3d_helper.key_mean_shape.squeeze().reshape([1, -1]).cpu().numpy()
385
+ cano_lm3d = cano_lm3d.reshape([t, -1, 3])
386
+ WH = 512
387
+ cano_lm3d = (cano_lm3d * WH/2 + WH/2).astype(int)
388
+
389
+ with torch.no_grad():
390
+ rot = ParametricFaceModel.compute_rotation(euler_angle)
391
+ extrinsic = torch.zeros([rot.shape[0], 4, 4]).to(rot.device)
392
+ extrinsic[:, :3,:3] = rot
393
+ extrinsic[:, :3, 3] = trans # / 10
394
+ extrinsic[:, 3, 3] = 1
395
+ extrinsic = extrinsic.cpu().numpy()
396
+
397
+ xy_camera_visualizer = CameraPoseVisualizer(xlim=[extrinsic[:,0,3].min().item()-0.5,extrinsic[:,0,3].max().item()+0.5],ylim=[extrinsic[:,1,3].min().item()-0.5,extrinsic[:,1,3].max().item()+0.5], zlim=[extrinsic[:,2,3].min().item()-0.5,extrinsic[:,2,3].max().item()+0.5], view_mode='xy')
398
+ xz_camera_visualizer = CameraPoseVisualizer(xlim=[extrinsic[:,0,3].min().item()-0.5,extrinsic[:,0,3].max().item()+0.5],ylim=[extrinsic[:,1,3].min().item()-0.5,extrinsic[:,1,3].max().item()+0.5], zlim=[extrinsic[:,2,3].min().item()-0.5,extrinsic[:,2,3].max().item()+0.5], view_mode='xz')
399
+
400
+ if nerf:
401
+ debug_name = video_name.replace("/raw/", "/processed/").replace(".mp4", "/debug_fit_3dmm.mp4")
402
+ else:
403
+ debug_name = video_name.replace("/video/", "/coeff_fit_debug/").replace(".mp4", "_debug.mp4")
404
+ try:
405
+ os.makedirs(os.path.dirname(debug_name), exist_ok=True)
406
+ except: pass
407
+ writer = imageio.get_writer(debug_name, fps=25)
408
+ if id_mode == 'global':
409
+ id_para = id_para.repeat([exp_para.shape[0], 1])
410
+ proj_geo = face_model.compute_for_landmark_fit(id_para, exp_para, euler_angle, trans)
411
+ lm68s = proj_geo[:,:,:2].detach().cpu().numpy() # [T, 68,2]
412
+ lm68s = lm68s * img_scale_factor
413
+ lms = lms * img_scale_factor
414
+ lm68s[..., 1] = img_h - lm68s[..., 1] # flip the height axis
415
+ lms[..., 1] = img_h - lms[..., 1] # flip the height axis
416
+ lm68s = lm68s.astype(int)
417
+ for i in tqdm.trange(min(250, len(frames)), desc=f'rendering debug video to {debug_name}..'):
418
+ xy_cam3d_img = xy_camera_visualizer.extrinsic2pyramid(extrinsic[i], focal_len_scaled=0.25)
419
+ xy_cam3d_img = cv2.resize(xy_cam3d_img, (512,512))
420
+ xz_cam3d_img = xz_camera_visualizer.extrinsic2pyramid(extrinsic[i], focal_len_scaled=0.25)
421
+ xz_cam3d_img = cv2.resize(xz_cam3d_img, (512,512))
422
+
423
+ img = copy.deepcopy(frames[i])
424
+ img2 = copy.deepcopy(frames[i])
425
+
426
+ img = draw_axes(img, euler_angle[i,0].item(), euler_angle[i,1].item(), euler_angle[i,2].item(), lm68s[i][4][0].item(), lm68s[i, 4][1].item(), size=50)
427
+
428
+ gt_lm_color = (255, 0, 0)
429
+
430
+ for lm in lm68s[i]:
431
+ img = cv2.circle(img, lm, 1, (0, 0, 255), thickness=-1) # blue
432
+ for gt_lm in lms[i]:
433
+ img2 = cv2.circle(img2, gt_lm.cpu().numpy().astype(int), 2, gt_lm_color, thickness=1)
434
+
435
+ cano_lm3d_img = np.ones([WH, WH, 3], dtype=np.uint8) * 255
436
+ for j in range(len(cano_lm3d[i])):
437
+ x, y, _ = cano_lm3d[i, j]
438
+ color = (255,0,0)
439
+ cano_lm3d_img = cv2.circle(cano_lm3d_img, center=(x,y), radius=3, color=color, thickness=-1)
440
+ cano_lm3d_img = cv2.flip(cano_lm3d_img, 0)
441
+
442
+ _, secc_img = secc_renderer(id_para[0:1], exp_para[i:i+1], euler_angle[i:i+1]*0, trans[i:i+1]*0)
443
+ secc_img = (secc_img +1)*127.5
444
+ secc_img = F.interpolate(secc_img, size=(img_h, img_w))
445
+ secc_img = secc_img.permute(0, 2,3,1).int().cpu().numpy()[0]
446
+ out_img1 = np.concatenate([img, img2, secc_img], axis=1).astype(np.uint8)
447
+ font = cv2.FONT_HERSHEY_SIMPLEX
448
+ out_img2 = np.concatenate([xy_cam3d_img, xz_cam3d_img, cano_lm3d_img], axis=1).astype(np.uint8)
449
+ out_img = np.concatenate([out_img1, out_img2], axis=0)
450
+ writer.append_data(out_img)
451
+ writer.close()
452
+
453
+ # if bad_yaw:
454
+ # print(f"Skip {video_name} due to TOO LARGE YAW")
455
+ # return False
456
+
457
+ if save:
458
+ np.save(out_name, coeff_dict, allow_pickle=True)
459
+ return coeff_dict
460
+
461
+ def out_exist_job(vid_name):
462
+ out_name = vid_name.replace("/video/", "/coeff_fit_mp/").replace(".mp4","_coeff_fit_mp.npy")
463
+ lms_name = vid_name.replace("/video/", "/lms_2d/").replace(".mp4","_lms.npy")
464
+ if os.path.exists(out_name) or not os.path.exists(lms_name):
465
+ return None
466
+ else:
467
+ return vid_name
468
+
469
+ def get_todo_vid_names(vid_names):
470
+ if len(vid_names) == 1: # single video, nerf
471
+ return vid_names
472
+ todo_vid_names = []
473
+ for i, res in multiprocess_run_tqdm(out_exist_job, vid_names, num_workers=16):
474
+ if res is not None:
475
+ todo_vid_names.append(res)
476
+ return todo_vid_names
477
+
478
+
479
+ if __name__ == '__main__':
480
+ import argparse, glob, tqdm
481
+ parser = argparse.ArgumentParser()
482
+ # parser.add_argument("--vid_dir", default='/home/tiger/datasets/raw/CelebV-HQ/video')
483
+ parser.add_argument("--vid_dir", default='data/raw/videos/May_10s.mp4')
484
+ parser.add_argument("--ds_name", default='nerf') # 'nerf' | 'CelebV-HQ' | 'TH1KH_512' | etc
485
+ parser.add_argument("--seed", default=0, type=int)
486
+ parser.add_argument("--process_id", default=0, type=int)
487
+ parser.add_argument("--total_process", default=1, type=int)
488
+ parser.add_argument("--id_mode", default='global', type=str) # global | finegrained
489
+ parser.add_argument("--keypoint_mode", default='mediapipe', type=str)
490
+ parser.add_argument("--large_yaw_threshold", default=9999999.9, type=float) # could be 0.7
491
+ parser.add_argument("--debug", action='store_true')
492
+ parser.add_argument("--reset", action='store_true')
493
+ parser.add_argument("--load_names", action="store_true")
494
+
495
+ args = parser.parse_args()
496
+ vid_dir = args.vid_dir
497
+ ds_name = args.ds_name
498
+ load_names = args.load_names
499
+
500
+ print(f"args {args}")
501
+
502
+ if ds_name.lower() == 'nerf': # 处理单个视频
503
+ vid_names = [vid_dir]
504
+ out_names = [video_name.replace("/raw/", "/processed/").replace(".mp4","_coeff_fit_mp.npy") for video_name in vid_names]
505
+ else: # 处理整个数据集
506
+ if ds_name in ['lrs3_trainval']:
507
+ vid_name_pattern = os.path.join(vid_dir, "*/*.mp4")
508
+ elif ds_name in ['TH1KH_512', 'CelebV-HQ']:
509
+ vid_name_pattern = os.path.join(vid_dir, "*.mp4")
510
+ elif ds_name in ['lrs2', 'lrs3', 'voxceleb2', 'CMLR']:
511
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*.mp4")
512
+ elif ds_name in ["RAVDESS", 'VFHQ']:
513
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*/*.mp4")
514
+ else:
515
+ raise NotImplementedError()
516
+
517
+ vid_names_path = os.path.join(vid_dir, "vid_names.pkl")
518
+ if os.path.exists(vid_names_path) and load_names:
519
+ print(f"loading vid names from {vid_names_path}")
520
+ vid_names = load_file(vid_names_path)
521
+ else:
522
+ vid_names = multiprocess_glob(vid_name_pattern)
523
+ vid_names = sorted(vid_names)
524
+ print(f"saving vid names to {vid_names_path}")
525
+ save_file(vid_names_path, vid_names)
526
+ out_names = [video_name.replace("/video/", "/coeff_fit_mp/").replace(".mp4","_coeff_fit_mp.npy") for video_name in vid_names]
527
+
528
+ print(vid_names[:10])
529
+ random.seed(args.seed)
530
+ random.shuffle(vid_names)
531
+
532
+ face_model = ParametricFaceModel(bfm_folder='deep_3drecon/BFM',
533
+ camera_distance=10, focal=1015, keypoint_mode=args.keypoint_mode)
534
+ face_model.to(torch.device("cuda:0"))
535
+ secc_renderer = SECC_Renderer(512)
536
+ secc_renderer.to("cuda:0")
537
+
538
+ process_id = args.process_id
539
+ total_process = args.total_process
540
+ if total_process > 1:
541
+ assert process_id <= total_process -1
542
+ num_samples_per_process = len(vid_names) // total_process
543
+ if process_id == total_process:
544
+ vid_names = vid_names[process_id * num_samples_per_process : ]
545
+ else:
546
+ vid_names = vid_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
547
+
548
+ if not args.reset:
549
+ vid_names = get_todo_vid_names(vid_names)
550
+
551
+ failed_img_names = []
552
+ for i in tqdm.trange(len(vid_names), desc=f"process {process_id}: fitting 3dmm ..."):
553
+ img_name = vid_names[i]
554
+ try:
555
+ is_person_specific_data = ds_name=='nerf'
556
+ success = fit_3dmm_for_a_video(img_name, is_person_specific_data, args.id_mode, args.debug, large_yaw_threshold=args.large_yaw_threshold)
557
+ if not success:
558
+ failed_img_names.append(img_name)
559
+ except Exception as e:
560
+ print(img_name, e)
561
+ failed_img_names.append(img_name)
562
+ print(f"finished {i + 1} / {len(vid_names)} = {(i + 1) / len(vid_names):.4f}, failed {len(failed_img_names)} / {i + 1} = {len(failed_img_names) / (i + 1):.4f}")
563
+ sys.stdout.flush()
564
+ print(f"all failed image names: {failed_img_names}")
565
+ print(f"All finished!")
data_gen/utils/process_video/inpaint_torso_imgs.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import os
3
+ import numpy as np
4
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
5
+ from scipy.ndimage import binary_erosion, binary_dilation
6
+
7
+ from tasks.eg3ds.loss_utils.segment_loss.mp_segmenter import MediapipeSegmenter
8
+ seg_model = MediapipeSegmenter()
9
+
10
+ def inpaint_torso_job(video_name, idx=None, total=None):
11
+ raw_img_dir = video_name.replace(".mp4", "").replace("/video/","/gt_imgs/")
12
+ img_names = glob.glob(os.path.join(raw_img_dir, "*.jpg"))
13
+
14
+ for image_path in tqdm.tqdm(img_names):
15
+ # read ori image
16
+ ori_image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) # [H, W, 3]
17
+ segmap = seg_model._cal_seg_map(cv2.cvtColor(ori_image, cv2.COLOR_BGR2RGB))
18
+ head_part = (segmap[1] + segmap[3] + segmap[5]).astype(np.bool)
19
+ torso_part = (segmap[4]).astype(np.bool)
20
+ neck_part = (segmap[2]).astype(np.bool)
21
+ bg_part = segmap[0].astype(np.bool)
22
+ head_image = cv2.imread(image_path.replace("/gt_imgs/", "/head_imgs/"), cv2.IMREAD_UNCHANGED) # [H, W, 3]
23
+ torso_image = cv2.imread(image_path.replace("/gt_imgs/", "/torso_imgs/"), cv2.IMREAD_UNCHANGED) # [H, W, 3]
24
+ bg_image = cv2.imread(image_path.replace("/gt_imgs/", "/bg_imgs/"), cv2.IMREAD_UNCHANGED) # [H, W, 3]
25
+
26
+ # head_part = (head_image[...,0] != 0) & (head_image[...,1] != 0) & (head_image[...,2] != 0)
27
+ # torso_part = (torso_image[...,0] != 0) & (torso_image[...,1] != 0) & (torso_image[...,2] != 0)
28
+ # bg_part = (bg_image[...,0] != 0) & (bg_image[...,1] != 0) & (bg_image[...,2] != 0)
29
+
30
+ # get gt image
31
+ gt_image = ori_image.copy()
32
+ gt_image[bg_part] = bg_image[bg_part]
33
+ cv2.imwrite(image_path.replace('ori_imgs', 'gt_imgs'), gt_image)
34
+
35
+ # get torso image
36
+ torso_image = gt_image.copy() # rgb
37
+ torso_image[head_part] = 0
38
+ torso_alpha = 255 * np.ones((gt_image.shape[0], gt_image.shape[1], 1), dtype=np.uint8) # alpha
39
+
40
+ # torso part "vertical" in-painting...
41
+ L = 8 + 1
42
+ torso_coords = np.stack(np.nonzero(torso_part), axis=-1) # [M, 2]
43
+ # lexsort: sort 2D coords first by y then by x,
44
+ # ref: https://stackoverflow.com/questions/2706605/sorting-a-2d-numpy-array-by-multiple-axes
45
+ inds = np.lexsort((torso_coords[:, 0], torso_coords[:, 1]))
46
+ torso_coords = torso_coords[inds]
47
+ # choose the top pixel for each column
48
+ u, uid, ucnt = np.unique(torso_coords[:, 1], return_index=True, return_counts=True)
49
+ top_torso_coords = torso_coords[uid] # [m, 2]
50
+ # only keep top-is-head pixels
51
+ top_torso_coords_up = top_torso_coords.copy() - np.array([1, 0]) # [N, 2]
52
+ mask = head_part[tuple(top_torso_coords_up.T)]
53
+ if mask.any():
54
+ top_torso_coords = top_torso_coords[mask]
55
+ # get the color
56
+ top_torso_colors = gt_image[tuple(top_torso_coords.T)] # [m, 3]
57
+ # construct inpaint coords (vertically up, or minus in x)
58
+ inpaint_torso_coords = top_torso_coords[None].repeat(L, 0) # [L, m, 2]
59
+ inpaint_offsets = np.stack([-np.arange(L), np.zeros(L, dtype=np.int32)], axis=-1)[:, None] # [L, 1, 2]
60
+ inpaint_torso_coords += inpaint_offsets
61
+ inpaint_torso_coords = inpaint_torso_coords.reshape(-1, 2) # [Lm, 2]
62
+ inpaint_torso_colors = top_torso_colors[None].repeat(L, 0) # [L, m, 3]
63
+ darken_scaler = 0.98 ** np.arange(L).reshape(L, 1, 1) # [L, 1, 1]
64
+ inpaint_torso_colors = (inpaint_torso_colors * darken_scaler).reshape(-1, 3) # [Lm, 3]
65
+ # set color
66
+ torso_image[tuple(inpaint_torso_coords.T)] = inpaint_torso_colors
67
+
68
+ inpaint_torso_mask = np.zeros_like(torso_image[..., 0]).astype(bool)
69
+ inpaint_torso_mask[tuple(inpaint_torso_coords.T)] = True
70
+ else:
71
+ inpaint_torso_mask = None
72
+
73
+ # neck part "vertical" in-painting...
74
+ push_down = 4
75
+ L = 48 + push_down + 1
76
+
77
+ neck_part = binary_dilation(neck_part, structure=np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=bool), iterations=3)
78
+
79
+ neck_coords = np.stack(np.nonzero(neck_part), axis=-1) # [M, 2]
80
+ # lexsort: sort 2D coords first by y then by x,
81
+ # ref: https://stackoverflow.com/questions/2706605/sorting-a-2d-numpy-array-by-multiple-axes
82
+ inds = np.lexsort((neck_coords[:, 0], neck_coords[:, 1]))
83
+ neck_coords = neck_coords[inds]
84
+ # choose the top pixel for each column
85
+ u, uid, ucnt = np.unique(neck_coords[:, 1], return_index=True, return_counts=True)
86
+ top_neck_coords = neck_coords[uid] # [m, 2]
87
+ # only keep top-is-head pixels
88
+ top_neck_coords_up = top_neck_coords.copy() - np.array([1, 0])
89
+ mask = head_part[tuple(top_neck_coords_up.T)]
90
+
91
+ top_neck_coords = top_neck_coords[mask]
92
+ # push these top down for 4 pixels to make the neck inpainting more natural...
93
+ offset_down = np.minimum(ucnt[mask] - 1, push_down)
94
+ top_neck_coords += np.stack([offset_down, np.zeros_like(offset_down)], axis=-1)
95
+ # get the color
96
+ top_neck_colors = gt_image[tuple(top_neck_coords.T)] # [m, 3]
97
+ # construct inpaint coords (vertically up, or minus in x)
98
+ inpaint_neck_coords = top_neck_coords[None].repeat(L, 0) # [L, m, 2]
99
+ inpaint_offsets = np.stack([-np.arange(L), np.zeros(L, dtype=np.int32)], axis=-1)[:, None] # [L, 1, 2]
100
+ inpaint_neck_coords += inpaint_offsets
101
+ inpaint_neck_coords = inpaint_neck_coords.reshape(-1, 2) # [Lm, 2]
102
+ inpaint_neck_colors = top_neck_colors[None].repeat(L, 0) # [L, m, 3]
103
+ darken_scaler = 0.98 ** np.arange(L).reshape(L, 1, 1) # [L, 1, 1]
104
+ inpaint_neck_colors = (inpaint_neck_colors * darken_scaler).reshape(-1, 3) # [Lm, 3]
105
+ # set color
106
+ torso_image[tuple(inpaint_neck_coords.T)] = inpaint_neck_colors
107
+
108
+ # apply blurring to the inpaint area to avoid vertical-line artifects...
109
+ inpaint_mask = np.zeros_like(torso_image[..., 0]).astype(bool)
110
+ inpaint_mask[tuple(inpaint_neck_coords.T)] = True
111
+
112
+ blur_img = torso_image.copy()
113
+ blur_img = cv2.GaussianBlur(blur_img, (5, 5), cv2.BORDER_DEFAULT)
114
+
115
+ torso_image[inpaint_mask] = blur_img[inpaint_mask]
116
+
117
+ # set mask
118
+ mask = (neck_part | torso_part | inpaint_mask)
119
+ if inpaint_torso_mask is not None:
120
+ mask = mask | inpaint_torso_mask
121
+ torso_image[~mask] = 0
122
+ torso_alpha[~mask] = 0
123
+
124
+ cv2.imwrite("0.png", np.concatenate([torso_image, torso_alpha], axis=-1))
125
+
126
+ print(f'[INFO] ===== extracted torso and gt images =====')
127
+
128
+
129
+ def out_exist_job(vid_name):
130
+ out_dir1 = vid_name.replace("/video/", "/inpaint_torso_imgs/").replace(".mp4","")
131
+ out_dir2 = vid_name.replace("/video/", "/inpaint_torso_with_bg_imgs/").replace(".mp4","")
132
+ out_dir3 = vid_name.replace("/video/", "/torso_imgs/").replace(".mp4","")
133
+ out_dir4 = vid_name.replace("/video/", "/torso_with_bg_imgs/").replace(".mp4","")
134
+
135
+ if os.path.exists(out_dir1) and os.path.exists(out_dir1) and os.path.exists(out_dir2) and os.path.exists(out_dir3) and os.path.exists(out_dir4):
136
+ num_frames = len(os.listdir(out_dir1))
137
+ if len(os.listdir(out_dir1)) == num_frames and len(os.listdir(out_dir2)) == num_frames and len(os.listdir(out_dir3)) == num_frames and len(os.listdir(out_dir4)) == num_frames:
138
+ return None
139
+ else:
140
+ return vid_name
141
+ else:
142
+ return vid_name
143
+
144
+ def get_todo_vid_names(vid_names):
145
+ todo_vid_names = []
146
+ for i, res in multiprocess_run_tqdm(out_exist_job, vid_names, num_workers=16):
147
+ if res is not None:
148
+ todo_vid_names.append(res)
149
+ return todo_vid_names
150
+
151
+ if __name__ == '__main__':
152
+ import argparse, glob, tqdm, random
153
+ parser = argparse.ArgumentParser()
154
+ parser.add_argument("--vid_dir", default='/home/tiger/datasets/raw/CelebV-HQ/video')
155
+ parser.add_argument("--ds_name", default='CelebV-HQ')
156
+ parser.add_argument("--num_workers", default=48, type=int)
157
+ parser.add_argument("--seed", default=0, type=int)
158
+ parser.add_argument("--process_id", default=0, type=int)
159
+ parser.add_argument("--total_process", default=1, type=int)
160
+ parser.add_argument("--reset", action='store_true')
161
+
162
+ inpaint_torso_job('/home/tiger/datasets/raw/CelebV-HQ/video/dgdEr-mXQT4_8.mp4')
163
+ # args = parser.parse_args()
164
+ # vid_dir = args.vid_dir
165
+ # ds_name = args.ds_name
166
+ # if ds_name in ['lrs3_trainval']:
167
+ # mp4_name_pattern = os.path.join(vid_dir, "*/*.mp4")
168
+ # if ds_name in ['TH1KH_512', 'CelebV-HQ']:
169
+ # vid_names = glob.glob(os.path.join(vid_dir, "*.mp4"))
170
+ # elif ds_name in ['lrs2', 'lrs3', 'voxceleb2']:
171
+ # vid_name_pattern = os.path.join(vid_dir, "*/*/*.mp4")
172
+ # vid_names = glob.glob(vid_name_pattern)
173
+ # vid_names = sorted(vid_names)
174
+ # random.seed(args.seed)
175
+ # random.shuffle(vid_names)
176
+
177
+ # process_id = args.process_id
178
+ # total_process = args.total_process
179
+ # if total_process > 1:
180
+ # assert process_id <= total_process -1
181
+ # num_samples_per_process = len(vid_names) // total_process
182
+ # if process_id == total_process:
183
+ # vid_names = vid_names[process_id * num_samples_per_process : ]
184
+ # else:
185
+ # vid_names = vid_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
186
+
187
+ # if not args.reset:
188
+ # vid_names = get_todo_vid_names(vid_names)
189
+ # print(f"todo videos number: {len(vid_names)}")
190
+
191
+ # fn_args = [(vid_name,i,len(vid_names)) for i, vid_name in enumerate(vid_names)]
192
+ # for vid_name in multiprocess_run_tqdm(inpaint_torso_job ,fn_args, desc=f"Root process {args.process_id}: extracting segment images", num_workers=args.num_workers):
193
+ # pass
data_gen/utils/process_video/resample_video_to_25fps_resize_to_512.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, glob
2
+ import cv2
3
+ from utils.commons.os_utils import multiprocess_glob
4
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
5
+
6
+ def get_video_infos(video_path):
7
+ vid_cap = cv2.VideoCapture(video_path)
8
+ height = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
9
+ width = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
10
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
11
+ total_frames = int(vid_cap.get(cv2.CAP_PROP_FRAME_COUNT))
12
+ return {'height': height, 'width': width, 'fps': fps, 'total_frames':total_frames}
13
+
14
+ def extract_img_job(video_name:str):
15
+ out_path = video_name.replace("/video_raw/","/video/",1)
16
+ os.makedirs(os.path.dirname(out_path), exist_ok=True)
17
+ ffmpeg_path = "/usr/bin/ffmpeg"
18
+ vid_info = get_video_infos(video_name)
19
+ assert vid_info['width'] == vid_info['height']
20
+ cmd = f'{ffmpeg_path} -i {video_name} -vf fps={25},scale=w=512:h=512 -q:v 1 -c:v libx264 -pix_fmt yuv420p -b:v 2000k -v quiet -y {out_path}'
21
+ os.system(cmd)
22
+
23
+ def extract_img_job_crop(video_name:str):
24
+ out_path = video_name.replace("/video_raw/","/video/",1)
25
+ os.makedirs(os.path.dirname(out_path), exist_ok=True)
26
+ ffmpeg_path = "/usr/bin/ffmpeg"
27
+ vid_info = get_video_infos(video_name)
28
+ wh = min(vid_info['width'], vid_info['height'])
29
+ cmd = f'{ffmpeg_path} -i {video_name} -vf fps={25},crop={wh}:{wh},scale=w=512:h=512 -q:v 1 -c:v libx264 -pix_fmt yuv420p -b:v 2000k -v quiet -y {out_path}'
30
+ os.system(cmd)
31
+
32
+ def extract_img_job_crop_ravdess(video_name:str):
33
+ out_path = video_name.replace("/video_raw/","/video/",1)
34
+ os.makedirs(os.path.dirname(out_path), exist_ok=True)
35
+ ffmpeg_path = "/usr/bin/ffmpeg"
36
+ cmd = f'{ffmpeg_path} -i {video_name} -vf fps={25},crop=720:720,scale=w=512:h=512 -q:v 1 -c:v libx264 -pix_fmt yuv420p -b:v 2000k -v quiet -y {out_path}'
37
+ os.system(cmd)
38
+
39
+ if __name__ == '__main__':
40
+ import argparse, glob, tqdm, random
41
+ parser = argparse.ArgumentParser()
42
+ parser.add_argument("--vid_dir", default='/home/tiger/datasets/raw/CelebV-HQ/video_raw/')
43
+ parser.add_argument("--ds_name", default='CelebV-HQ')
44
+ parser.add_argument("--num_workers", default=32, type=int)
45
+ parser.add_argument("--process_id", default=0, type=int)
46
+ parser.add_argument("--total_process", default=1, type=int)
47
+ args = parser.parse_args()
48
+ print(f"args {args}")
49
+
50
+ vid_dir = args.vid_dir
51
+ ds_name = args.ds_name
52
+ if ds_name in ['lrs3_trainval']:
53
+ mp4_name_pattern = os.path.join(vid_dir, "*/*.mp4")
54
+ elif ds_name in ['TH1KH_512', 'CelebV-HQ']:
55
+ vid_names = multiprocess_glob(os.path.join(vid_dir, "*.mp4"))
56
+ elif ds_name in ['lrs2', 'lrs3', 'voxceleb2', 'CMLR']:
57
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*.mp4")
58
+ vid_names = multiprocess_glob(vid_name_pattern)
59
+ elif ds_name in ["RAVDESS", 'VFHQ']:
60
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*/*.mp4")
61
+ vid_names = multiprocess_glob(vid_name_pattern)
62
+ else:
63
+ raise NotImplementedError()
64
+ vid_names = sorted(vid_names)
65
+ print(f"total video number : {len(vid_names)}")
66
+ print(f"first {vid_names[0]} last {vid_names[-1]}")
67
+ # exit()
68
+ process_id = args.process_id
69
+ total_process = args.total_process
70
+ if total_process > 1:
71
+ assert process_id <= total_process -1
72
+ num_samples_per_process = len(vid_names) // total_process
73
+ if process_id == total_process:
74
+ vid_names = vid_names[process_id * num_samples_per_process : ]
75
+ else:
76
+ vid_names = vid_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
77
+
78
+ if ds_name == "RAVDESS":
79
+ for i, res in multiprocess_run_tqdm(extract_img_job_crop_ravdess, vid_names, num_workers=args.num_workers, desc="resampling videos"):
80
+ pass
81
+ elif ds_name == "CMLR":
82
+ for i, res in multiprocess_run_tqdm(extract_img_job_crop, vid_names, num_workers=args.num_workers, desc="resampling videos"):
83
+ pass
84
+ else:
85
+ for i, res in multiprocess_run_tqdm(extract_img_job, vid_names, num_workers=args.num_workers, desc="resampling videos"):
86
+ pass
87
+
data_gen/utils/process_video/split_video_to_imgs.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, glob
2
+ from utils.commons.multiprocess_utils import multiprocess_run_tqdm
3
+
4
+ from data_gen.utils.path_converter import PathConverter, pc
5
+
6
+ # mp4_names = glob.glob("/home/tiger/datasets/raw/CelebV-HQ/video/*.mp4")
7
+
8
+ def extract_img_job(video_name, raw_img_dir=None):
9
+ if raw_img_dir is not None:
10
+ out_path = raw_img_dir
11
+ else:
12
+ out_path = pc.to(video_name.replace(".mp4", ""), "vid", "gt")
13
+ os.makedirs(out_path, exist_ok=True)
14
+ ffmpeg_path = "/usr/bin/ffmpeg"
15
+ cmd = f'{ffmpeg_path} -i {video_name} -vf fps={25},scale=w=512:h=512 -qmin 1 -q:v 1 -start_number 0 -v quiet {os.path.join(out_path, "%8d.jpg")}'
16
+ os.system(cmd)
17
+
18
+ if __name__ == '__main__':
19
+ import argparse, glob, tqdm, random
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument("--vid_dir", default='/home/tiger/datasets/raw/CelebV-HQ/video')
22
+ parser.add_argument("--ds_name", default='CelebV-HQ')
23
+ parser.add_argument("--num_workers", default=64, type=int)
24
+ parser.add_argument("--process_id", default=0, type=int)
25
+ parser.add_argument("--total_process", default=1, type=int)
26
+ args = parser.parse_args()
27
+ vid_dir = args.vid_dir
28
+ ds_name = args.ds_name
29
+ if ds_name in ['lrs3_trainval']:
30
+ mp4_name_pattern = os.path.join(vid_dir, "*/*.mp4")
31
+ elif ds_name in ['TH1KH_512', 'CelebV-HQ']:
32
+ vid_names = glob.glob(os.path.join(vid_dir, "*.mp4"))
33
+ elif ds_name in ['lrs2', 'lrs3', 'voxceleb2']:
34
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*.mp4")
35
+ vid_names = glob.glob(vid_name_pattern)
36
+ elif ds_name in ["RAVDESS", 'VFHQ']:
37
+ vid_name_pattern = os.path.join(vid_dir, "*/*/*/*.mp4")
38
+ vid_names = glob.glob(vid_name_pattern)
39
+ vid_names = sorted(vid_names)
40
+
41
+ process_id = args.process_id
42
+ total_process = args.total_process
43
+ if total_process > 1:
44
+ assert process_id <= total_process -1
45
+ num_samples_per_process = len(vid_names) // total_process
46
+ if process_id == total_process:
47
+ vid_names = vid_names[process_id * num_samples_per_process : ]
48
+ else:
49
+ vid_names = vid_names[process_id * num_samples_per_process : (process_id+1) * num_samples_per_process]
50
+
51
+ for i, res in multiprocess_run_tqdm(extract_img_job, vid_names, num_workers=args.num_workers, desc="extracting images"):
52
+ pass
53
+
data_util/face3d_helper.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+ import torch.nn as nn
5
+ from scipy.io import loadmat
6
+
7
+ from deep_3drecon.deep_3drecon_models.bfm import perspective_projection
8
+
9
+
10
+ class Face3DHelper(nn.Module):
11
+ def __init__(self, bfm_dir='deep_3drecon/BFM', keypoint_mode='lm68', use_gpu=True):
12
+ super().__init__()
13
+ self.keypoint_mode = keypoint_mode # lm68 | mediapipe
14
+ self.bfm_dir = bfm_dir
15
+ self.load_3dmm()
16
+ if use_gpu: self.to("cuda")
17
+
18
+ def load_3dmm(self):
19
+ model = loadmat(os.path.join(self.bfm_dir, "BFM_model_front.mat"))
20
+ self.register_buffer('mean_shape',torch.from_numpy(model['meanshape'].transpose()).float()) # mean face shape. [3*N, 1], N=35709, xyz=3, ==> 3*N=107127
21
+ mean_shape = self.mean_shape.reshape([-1, 3])
22
+ # re-center
23
+ mean_shape = mean_shape - torch.mean(mean_shape, dim=0, keepdims=True)
24
+ self.mean_shape = mean_shape.reshape([-1, 1])
25
+ self.register_buffer('id_base',torch.from_numpy(model['idBase']).float()) # identity basis. [3*N,80], we have 80 eigen faces for identity
26
+ self.register_buffer('exp_base',torch.from_numpy(model['exBase']).float()) # expression basis. [3*N,64], we have 64 eigen faces for expression
27
+
28
+ self.register_buffer('mean_texure',torch.from_numpy(model['meantex'].transpose()).float()) # mean face texture. [3*N,1] (0-255)
29
+ self.register_buffer('tex_base',torch.from_numpy(model['texBase']).float()) # texture basis. [3*N,80], rgb=3
30
+
31
+ self.register_buffer('point_buf',torch.from_numpy(model['point_buf']).float()) # triangle indices for each vertex that lies in. starts from 1. [N,8] (1-F)
32
+ self.register_buffer('face_buf',torch.from_numpy(model['tri']).float()) # vertex indices in each triangle. starts from 1. [F,3] (1-N)
33
+ if self.keypoint_mode == 'mediapipe':
34
+ self.register_buffer('key_points', torch.from_numpy(np.load("deep_3drecon/BFM/index_mp468_from_mesh35709.npy").astype(np.int64)))
35
+ unmatch_mask = self.key_points < 0
36
+ self.key_points[unmatch_mask] = 0
37
+ else:
38
+ self.register_buffer('key_points',torch.from_numpy(model['keypoints'].squeeze().astype(np.int_)).long()) # vertex indices of 68 facial landmarks. starts from 1. [68,1]
39
+
40
+
41
+ self.register_buffer('key_mean_shape',self.mean_shape.reshape([-1,3])[self.key_points,:])
42
+ self.register_buffer('key_id_base', self.id_base.reshape([-1,3,80])[self.key_points, :, :].reshape([-1,80]))
43
+ self.register_buffer('key_exp_base', self.exp_base.reshape([-1,3,64])[self.key_points, :, :].reshape([-1,64]))
44
+ self.key_id_base_np = self.key_id_base.cpu().numpy()
45
+ self.key_exp_base_np = self.key_exp_base.cpu().numpy()
46
+
47
+ self.register_buffer('persc_proj', torch.tensor(perspective_projection(focal=1015, center=112)))
48
+ def split_coeff(self, coeff):
49
+ """
50
+ coeff: Tensor[B, T, c=257] or [T, c=257]
51
+ """
52
+ ret_dict = {
53
+ 'identity': coeff[..., :80], # identity, [b, t, c=80]
54
+ 'expression': coeff[..., 80:144], # expression, [b, t, c=80]
55
+ 'texture': coeff[..., 144:224], # texture, [b, t, c=80]
56
+ 'euler': coeff[..., 224:227], # euler euler for pose, [b, t, c=3]
57
+ 'translation': coeff[..., 254:257], # translation, [b, t, c=3]
58
+ 'gamma': coeff[..., 227:254] # lighting, [b, t, c=27]
59
+ }
60
+ return ret_dict
61
+
62
+ def reconstruct_face_mesh(self, id_coeff, exp_coeff):
63
+ """
64
+ Generate a pose-independent 3D face mesh!
65
+ id_coeff: Tensor[T, c=80]
66
+ exp_coeff: Tensor[T, c=64]
67
+ """
68
+ id_coeff = id_coeff.to(self.key_id_base.device)
69
+ exp_coeff = exp_coeff.to(self.key_id_base.device)
70
+ mean_face = self.mean_shape.squeeze().reshape([1, -1]) # [3N, 1] ==> [1, 3N]
71
+ id_base, exp_base = self.id_base, self.exp_base # [3*N, C]
72
+ identity_diff_face = torch.matmul(id_coeff, id_base.transpose(0,1)) # [t,c],[c,3N] ==> [t,3N]
73
+ expression_diff_face = torch.matmul(exp_coeff, exp_base.transpose(0,1)) # [t,c],[c,3N] ==> [t,3N]
74
+
75
+ face = mean_face + identity_diff_face + expression_diff_face # [t,3N]
76
+ face = face.reshape([face.shape[0], -1, 3]) # [t,N,3]
77
+ # re-centering the face with mean_xyz, so the face will be in [-1, 1]
78
+ # mean_xyz = self.mean_shape.squeeze().reshape([-1,3]).mean(dim=0) # [1, 3]
79
+ # face_mesh = face - mean_xyz.unsqueeze(0) # [t,N,3]
80
+ return face
81
+
82
+ def reconstruct_cano_lm3d(self, id_coeff, exp_coeff):
83
+ """
84
+ Generate 3D landmark with keypoint base!
85
+ id_coeff: Tensor[T, c=80]
86
+ exp_coeff: Tensor[T, c=64]
87
+ """
88
+ id_coeff = id_coeff.to(self.key_id_base.device)
89
+ exp_coeff = exp_coeff.to(self.key_id_base.device)
90
+ mean_face = self.key_mean_shape.squeeze().reshape([1, -1]) # [3*68, 1] ==> [1, 3*68]
91
+ id_base, exp_base = self.key_id_base, self.key_exp_base # [3*68, C]
92
+ identity_diff_face = torch.matmul(id_coeff, id_base.transpose(0,1)) # [t,c],[c,3*68] ==> [t,3*68]
93
+ expression_diff_face = torch.matmul(exp_coeff, exp_base.transpose(0,1)) # [t,c],[c,3*68] ==> [t,3*68]
94
+
95
+ face = mean_face + identity_diff_face + expression_diff_face # [t,3N]
96
+ face = face.reshape([face.shape[0], -1, 3]) # [t,N,3]
97
+ # re-centering the face with mean_xyz, so the face will be in [-1, 1]
98
+ # mean_xyz = self.key_mean_shape.squeeze().reshape([-1,3]).mean(dim=0) # [1, 3]
99
+ # lm3d = face - mean_xyz.unsqueeze(0) # [t,N,3]
100
+ return face
101
+
102
+ def reconstruct_lm3d(self, id_coeff, exp_coeff, euler, trans, to_camera=True):
103
+ """
104
+ Generate 3D landmark with keypoint base!
105
+ id_coeff: Tensor[T, c=80]
106
+ exp_coeff: Tensor[T, c=64]
107
+ """
108
+ id_coeff = id_coeff.to(self.key_id_base.device)
109
+ exp_coeff = exp_coeff.to(self.key_id_base.device)
110
+ mean_face = self.key_mean_shape.squeeze().reshape([1, -1]) # [3*68, 1] ==> [1, 3*68]
111
+ id_base, exp_base = self.key_id_base, self.key_exp_base # [3*68, C]
112
+ identity_diff_face = torch.matmul(id_coeff, id_base.transpose(0,1)) # [t,c],[c,3*68] ==> [t,3*68]
113
+ expression_diff_face = torch.matmul(exp_coeff, exp_base.transpose(0,1)) # [t,c],[c,3*68] ==> [t,3*68]
114
+
115
+ face = mean_face + identity_diff_face + expression_diff_face # [t,3N]
116
+ face = face.reshape([face.shape[0], -1, 3]) # [t,N,3]
117
+ # re-centering the face with mean_xyz, so the face will be in [-1, 1]
118
+ rot = self.compute_rotation(euler)
119
+ # transform
120
+ lm3d = face @ rot + trans.unsqueeze(1) # [t, N, 3]
121
+ # to camera
122
+ if to_camera:
123
+ lm3d[...,-1] = 10 - lm3d[...,-1]
124
+ return lm3d
125
+
126
+ def reconstruct_lm2d_nerf(self, id_coeff, exp_coeff, euler, trans):
127
+ lm2d = self.reconstruct_lm2d(id_coeff, exp_coeff, euler, trans, to_camera=False)
128
+ lm2d[..., 0] = 1 - lm2d[..., 0]
129
+ lm2d[..., 1] = 1 - lm2d[..., 1]
130
+ return lm2d
131
+
132
+ def reconstruct_lm2d(self, id_coeff, exp_coeff, euler, trans, to_camera=True):
133
+ """
134
+ Generate 3D landmark with keypoint base!
135
+ id_coeff: Tensor[T, c=80]
136
+ exp_coeff: Tensor[T, c=64]
137
+ """
138
+ is_btc_flag = True if id_coeff.ndim == 3 else False
139
+ if is_btc_flag:
140
+ b,t,_ = id_coeff.shape
141
+ id_coeff = id_coeff.reshape([b*t,-1])
142
+ exp_coeff = exp_coeff.reshape([b*t,-1])
143
+ euler = euler.reshape([b*t,-1])
144
+ trans = trans.reshape([b*t,-1])
145
+ id_coeff = id_coeff.to(self.key_id_base.device)
146
+ exp_coeff = exp_coeff.to(self.key_id_base.device)
147
+ mean_face = self.key_mean_shape.squeeze().reshape([1, -1]) # [3*68, 1] ==> [1, 3*68]
148
+ id_base, exp_base = self.key_id_base, self.key_exp_base # [3*68, C]
149
+ identity_diff_face = torch.matmul(id_coeff, id_base.transpose(0,1)) # [t,c],[c,3*68] ==> [t,3*68]
150
+ expression_diff_face = torch.matmul(exp_coeff, exp_base.transpose(0,1)) # [t,c],[c,3*68] ==> [t,3*68]
151
+
152
+ face = mean_face + identity_diff_face + expression_diff_face # [t,3N]
153
+ face = face.reshape([face.shape[0], -1, 3]) # [t,N,3]
154
+ # re-centering the face with mean_xyz, so the face will be in [-1, 1]
155
+ rot = self.compute_rotation(euler)
156
+ # transform
157
+ lm3d = face @ rot + trans.unsqueeze(1) # [t, N, 3]
158
+ # to camera
159
+ if to_camera:
160
+ lm3d[...,-1] = 10 - lm3d[...,-1]
161
+ # to image_plane
162
+ lm3d = lm3d @ self.persc_proj
163
+ lm2d = lm3d[..., :2] / lm3d[..., 2:]
164
+ # flip
165
+ lm2d[..., 1] = 224 - lm2d[..., 1]
166
+ lm2d /= 224
167
+ if is_btc_flag:
168
+ return lm2d.reshape([b,t,-1,2])
169
+ return lm2d
170
+
171
+ def compute_rotation(self, euler):
172
+ """
173
+ Return:
174
+ rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat
175
+
176
+ Parameters:
177
+ euler -- torch.tensor, size (B, 3), radian
178
+ """
179
+
180
+ batch_size = euler.shape[0]
181
+ euler = euler.to(self.key_id_base.device)
182
+ ones = torch.ones([batch_size, 1]).to(self.key_id_base.device)
183
+ zeros = torch.zeros([batch_size, 1]).to(self.key_id_base.device)
184
+ x, y, z = euler[:, :1], euler[:, 1:2], euler[:, 2:],
185
+
186
+ rot_x = torch.cat([
187
+ ones, zeros, zeros,
188
+ zeros, torch.cos(x), -torch.sin(x),
189
+ zeros, torch.sin(x), torch.cos(x)
190
+ ], dim=1).reshape([batch_size, 3, 3])
191
+
192
+ rot_y = torch.cat([
193
+ torch.cos(y), zeros, torch.sin(y),
194
+ zeros, ones, zeros,
195
+ -torch.sin(y), zeros, torch.cos(y)
196
+ ], dim=1).reshape([batch_size, 3, 3])
197
+
198
+ rot_z = torch.cat([
199
+ torch.cos(z), -torch.sin(z), zeros,
200
+ torch.sin(z), torch.cos(z), zeros,
201
+ zeros, zeros, ones
202
+ ], dim=1).reshape([batch_size, 3, 3])
203
+
204
+ rot = rot_z @ rot_y @ rot_x
205
+ return rot.permute(0, 2, 1)
206
+
207
+ def reconstruct_idexp_lm3d(self, id_coeff, exp_coeff):
208
+ """
209
+ Generate 3D landmark with keypoint base!
210
+ id_coeff: Tensor[T, c=80]
211
+ exp_coeff: Tensor[T, c=64]
212
+ """
213
+ id_coeff = id_coeff.to(self.key_id_base.device)
214
+ exp_coeff = exp_coeff.to(self.key_id_base.device)
215
+ id_base, exp_base = self.key_id_base, self.key_exp_base # [3*68, C]
216
+ identity_diff_face = torch.matmul(id_coeff, id_base.transpose(0,1)) # [t,c],[c,3*68] ==> [t,3*68]
217
+ expression_diff_face = torch.matmul(exp_coeff, exp_base.transpose(0,1)) # [t,c],[c,3*68] ==> [t,3*68]
218
+
219
+ face = identity_diff_face + expression_diff_face # [t,3N]
220
+ face = face.reshape([face.shape[0], -1, 3]) # [t,N,3]
221
+ lm3d = face * 10
222
+ return lm3d
223
+
224
+ def reconstruct_idexp_lm3d_np(self, id_coeff, exp_coeff):
225
+ """
226
+ Generate 3D landmark with keypoint base!
227
+ id_coeff: Tensor[T, c=80]
228
+ exp_coeff: Tensor[T, c=64]
229
+ """
230
+ id_base, exp_base = self.key_id_base_np, self.key_exp_base_np # [3*68, C]
231
+ identity_diff_face = np.dot(id_coeff, id_base.T) # [t,c],[c,3*68] ==> [t,3*68]
232
+ expression_diff_face = np.dot(exp_coeff, exp_base.T) # [t,c],[c,3*68] ==> [t,3*68]
233
+
234
+ face = identity_diff_face + expression_diff_face # [t,3N]
235
+ face = face.reshape([face.shape[0], -1, 3]) # [t,N,3]
236
+ lm3d = face * 10
237
+ return lm3d
238
+
239
+ def get_eye_mouth_lm_from_lm3d(self, lm3d):
240
+ eye_lm = lm3d[:, 17:48] # [T, 31, 3]
241
+ mouth_lm = lm3d[:, 48:68] # [T, 20, 3]
242
+ return eye_lm, mouth_lm
243
+
244
+ def get_eye_mouth_lm_from_lm3d_batch(self, lm3d):
245
+ eye_lm = lm3d[:, :, 17:48] # [T, 31, 3]
246
+ mouth_lm = lm3d[:, :, 48:68] # [T, 20, 3]
247
+ return eye_lm, mouth_lm
248
+
249
+ def close_mouth_for_idexp_lm3d(self, idexp_lm3d, freeze_as_first_frame=True):
250
+ idexp_lm3d = idexp_lm3d.reshape([-1, 68,3])
251
+ num_frames = idexp_lm3d.shape[0]
252
+ eps = 0.0
253
+ # [n_landmarks=68,xyz=3], x 代表左右,y代表上下,z代表深度
254
+ idexp_lm3d[:,49:54, 1] = (idexp_lm3d[:,49:54, 1] + idexp_lm3d[:,range(59,54,-1), 1])/2 + eps * 2
255
+ idexp_lm3d[:,range(59,54,-1), 1] = (idexp_lm3d[:,49:54, 1] + idexp_lm3d[:,range(59,54,-1), 1])/2 - eps * 2
256
+
257
+ idexp_lm3d[:,61:64, 1] = (idexp_lm3d[:,61:64, 1] + idexp_lm3d[:,range(67,64,-1), 1])/2 + eps
258
+ idexp_lm3d[:,range(67,64,-1), 1] = (idexp_lm3d[:,61:64, 1] + idexp_lm3d[:,range(67,64,-1), 1])/2 - eps
259
+
260
+ idexp_lm3d[:,49:54, 1] += (0.03 - idexp_lm3d[:,49:54, 1].mean(dim=1) + idexp_lm3d[:,61:64, 1].mean(dim=1)).unsqueeze(1).repeat([1,5])
261
+ idexp_lm3d[:,range(59,54,-1), 1] += (-0.03 - idexp_lm3d[:,range(59,54,-1), 1].mean(dim=1) + idexp_lm3d[:,range(67,64,-1), 1].mean(dim=1)).unsqueeze(1).repeat([1,5])
262
+
263
+ if freeze_as_first_frame:
264
+ idexp_lm3d[:, 48:68,] = idexp_lm3d[0, 48:68].unsqueeze(0).clone().repeat([num_frames, 1,1])*0
265
+ return idexp_lm3d.cpu()
266
+
267
+ def close_eyes_for_idexp_lm3d(self, idexp_lm3d):
268
+ idexp_lm3d = idexp_lm3d.reshape([-1, 68,3])
269
+ eps = 0.003
270
+ idexp_lm3d[:,37:39, 1] = (idexp_lm3d[:,37:39, 1] + idexp_lm3d[:,range(41,39,-1), 1])/2 + eps
271
+ idexp_lm3d[:,range(41,39,-1), 1] = (idexp_lm3d[:,37:39, 1] + idexp_lm3d[:,range(41,39,-1), 1])/2 - eps
272
+
273
+ idexp_lm3d[:,43:45, 1] = (idexp_lm3d[:,43:45, 1] + idexp_lm3d[:,range(47,45,-1), 1])/2 + eps
274
+ idexp_lm3d[:,range(47,45,-1), 1] = (idexp_lm3d[:,43:45, 1] + idexp_lm3d[:,range(47,45,-1), 1])/2 - eps
275
+
276
+ return idexp_lm3d
277
+
278
+ if __name__ == '__main__':
279
+ import cv2
280
+
281
+ font = cv2.FONT_HERSHEY_SIMPLEX
282
+
283
+ face_mesh_helper = Face3DHelper('deep_3drecon/BFM')
284
+ coeff_npy = 'data/coeff_fit_mp/crop_nana_003_coeff_fit_mp.npy'
285
+ coeff_dict = np.load(coeff_npy, allow_pickle=True).tolist()
286
+ lm3d = face_mesh_helper.reconstruct_lm2d(torch.tensor(coeff_dict['id']).cuda(), torch.tensor(coeff_dict['exp']).cuda(), torch.tensor(coeff_dict['euler']).cuda(), torch.tensor(coeff_dict['trans']).cuda() )
287
+
288
+ WH = 512
289
+ lm3d = (lm3d * WH).cpu().int().numpy()
290
+ eye_idx = list(range(36,48))
291
+ mouth_idx = list(range(48,68))
292
+ import imageio
293
+ debug_name = 'debug_lm3d.mp4'
294
+ writer = imageio.get_writer(debug_name, fps=25)
295
+ for i_img in range(len(lm3d)):
296
+ lm2d = lm3d[i_img ,:, :2] # [68, 2]
297
+ img = np.ones([WH, WH, 3], dtype=np.uint8) * 255
298
+ for i in range(len(lm2d)):
299
+ x, y = lm2d[i]
300
+ if i in eye_idx:
301
+ color = (0,0,255)
302
+ elif i in mouth_idx:
303
+ color = (0,255,0)
304
+ else:
305
+ color = (255,0,0)
306
+ img = cv2.circle(img, center=(x,y), radius=3, color=color, thickness=-1)
307
+ img = cv2.putText(img, f"{i}", org=(x,y), fontFace=font, fontScale=0.3, color=(255,0,0))
308
+ writer.append_data(img)
309
+ writer.close()
deep_3drecon/BFM/.gitkeep ADDED
File without changes
deep_3drecon/BFM/basel_53201.txt ADDED
The diff for this file is too large to render. See raw diff
 
deep_3drecon/BFM/index_mp468_from_mesh35709_v1.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d238a90df0c55075c9cea43dab76348421379a75c204931e34dbd2c11fb4b65
3
+ size 3872