Spaces:
Running
on
Zero
Running
on
Zero
Upload 82 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +13 -0
- src/pixel3dmm/preprocessing/MICA/.gitignore +71 -0
- src/pixel3dmm/preprocessing/MICA/LICENSE +88 -0
- src/pixel3dmm/preprocessing/MICA/README.md +72 -0
- src/pixel3dmm/preprocessing/MICA/condor.sh +44 -0
- src/pixel3dmm/preprocessing/MICA/condor/config.sub +16 -0
- src/pixel3dmm/preprocessing/MICA/condor/dataset.sh +21 -0
- src/pixel3dmm/preprocessing/MICA/condor/job.sh +22 -0
- src/pixel3dmm/preprocessing/MICA/configs/__init__.py +0 -0
- src/pixel3dmm/preprocessing/MICA/configs/config.py +128 -0
- src/pixel3dmm/preprocessing/MICA/configs/mica.yml +30 -0
- src/pixel3dmm/preprocessing/MICA/data/FLAME2020/FLAME_masks/FLAME_masks.gif +3 -0
- src/pixel3dmm/preprocessing/MICA/data/FLAME2020/FLAME_masks/FLAME_masks.pkl +3 -0
- src/pixel3dmm/preprocessing/MICA/data/FLAME2020/FLAME_masks/readme +2 -0
- src/pixel3dmm/preprocessing/MICA/data/FLAME2020/head_template.obj +0 -0
- src/pixel3dmm/preprocessing/MICA/data/FLAME2020/landmark_embedding.npy +3 -0
- src/pixel3dmm/preprocessing/MICA/datasets/README.md +128 -0
- src/pixel3dmm/preprocessing/MICA/datasets/__init__.py +55 -0
- src/pixel3dmm/preprocessing/MICA/datasets/base.py +125 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/__init__.py +0 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/generator.py +98 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/__init__.py +0 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/bu3dfe.py +71 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/d3dfacs.py +53 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/facewarehouse.py +50 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/florence.py +59 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/frgc.py +84 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/instance.py +127 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/lyhm.py +78 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/pb4d.py +71 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/stirling.py +85 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/main.py +39 -0
- src/pixel3dmm/preprocessing/MICA/datasets/creation/util.py +146 -0
- src/pixel3dmm/preprocessing/MICA/datasets/image_paths/BP4D.npy +3 -0
- src/pixel3dmm/preprocessing/MICA/datasets/image_paths/BU3DFE.npy +3 -0
- src/pixel3dmm/preprocessing/MICA/datasets/image_paths/D3DFACS.npy +3 -0
- src/pixel3dmm/preprocessing/MICA/datasets/image_paths/FACEWAREHOUSE.npy +3 -0
- src/pixel3dmm/preprocessing/MICA/datasets/image_paths/FLORENCE.npy +3 -0
- src/pixel3dmm/preprocessing/MICA/datasets/image_paths/FRGC.npy +3 -0
- src/pixel3dmm/preprocessing/MICA/datasets/image_paths/LYHM.npy +3 -0
- src/pixel3dmm/preprocessing/MICA/datasets/image_paths/STIRLING.npy +3 -0
- src/pixel3dmm/preprocessing/MICA/demo.py +156 -0
- src/pixel3dmm/preprocessing/MICA/demo/input/carell.jpg +0 -0
- src/pixel3dmm/preprocessing/MICA/demo/input/connelly.jpg +3 -0
- src/pixel3dmm/preprocessing/MICA/demo/input/justin.png +3 -0
- src/pixel3dmm/preprocessing/MICA/demo/input/lawrence.jpg +3 -0
- src/pixel3dmm/preprocessing/MICA/documents/BP4D.gif +3 -0
- src/pixel3dmm/preprocessing/MICA/documents/D3DFACS.gif +3 -0
- src/pixel3dmm/preprocessing/MICA/documents/FACEWAREHOUSE.gif +3 -0
- src/pixel3dmm/preprocessing/MICA/documents/FLORENCE.gif +3 -0
.gitattributes
CHANGED
@@ -46,3 +46,16 @@ src/pixel3dmm/preprocessing/facer/samples/data/weirdface.jpg filter=lfs diff=lfs
|
|
46 |
src/pixel3dmm/preprocessing/facer/samples/example_output/alignment.png filter=lfs diff=lfs merge=lfs -text
|
47 |
src/pixel3dmm/preprocessing/facer/samples/example_output/detect.png filter=lfs diff=lfs merge=lfs -text
|
48 |
src/pixel3dmm/preprocessing/facer/samples/example_output/parsing.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
src/pixel3dmm/preprocessing/facer/samples/example_output/alignment.png filter=lfs diff=lfs merge=lfs -text
|
47 |
src/pixel3dmm/preprocessing/facer/samples/example_output/detect.png filter=lfs diff=lfs merge=lfs -text
|
48 |
src/pixel3dmm/preprocessing/facer/samples/example_output/parsing.png filter=lfs diff=lfs merge=lfs -text
|
49 |
+
src/pixel3dmm/preprocessing/MICA/data/FLAME2020/FLAME_masks/FLAME_masks.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
+
src/pixel3dmm/preprocessing/MICA/demo/input/connelly.jpg filter=lfs diff=lfs merge=lfs -text
|
51 |
+
src/pixel3dmm/preprocessing/MICA/demo/input/justin.png filter=lfs diff=lfs merge=lfs -text
|
52 |
+
src/pixel3dmm/preprocessing/MICA/demo/input/lawrence.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
+
src/pixel3dmm/preprocessing/MICA/documents/BP4D.gif filter=lfs diff=lfs merge=lfs -text
|
54 |
+
src/pixel3dmm/preprocessing/MICA/documents/D3DFACS.gif filter=lfs diff=lfs merge=lfs -text
|
55 |
+
src/pixel3dmm/preprocessing/MICA/documents/FACEWAREHOUSE.gif filter=lfs diff=lfs merge=lfs -text
|
56 |
+
src/pixel3dmm/preprocessing/MICA/documents/FLORENCE.gif filter=lfs diff=lfs merge=lfs -text
|
57 |
+
src/pixel3dmm/preprocessing/MICA/documents/FRGC.gif filter=lfs diff=lfs merge=lfs -text
|
58 |
+
src/pixel3dmm/preprocessing/MICA/documents/LYHM.gif filter=lfs diff=lfs merge=lfs -text
|
59 |
+
src/pixel3dmm/preprocessing/MICA/documents/STIRLING.gif filter=lfs diff=lfs merge=lfs -text
|
60 |
+
src/pixel3dmm/preprocessing/MICA/documents/teaser.jpg filter=lfs diff=lfs merge=lfs -text
|
61 |
+
src/pixel3dmm/preprocessing/MICA/documents/voxceleb.gif filter=lfs diff=lfs merge=lfs -text
|
src/pixel3dmm/preprocessing/MICA/.gitignore
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Compiled source #
|
2 |
+
###################
|
3 |
+
.idea
|
4 |
+
|
5 |
+
datasets/creation/template/*
|
6 |
+
statistics/*
|
7 |
+
*.o
|
8 |
+
*.so
|
9 |
+
|
10 |
+
# Packages #
|
11 |
+
############
|
12 |
+
# it's better to unpack these files and commit the raw source
|
13 |
+
# git has its own built in compression methods
|
14 |
+
*.7z
|
15 |
+
*.dmg
|
16 |
+
*.gz
|
17 |
+
*.iso
|
18 |
+
*.jar
|
19 |
+
*.rar
|
20 |
+
*.tar
|
21 |
+
*.zip
|
22 |
+
|
23 |
+
# OS generated files #
|
24 |
+
######################
|
25 |
+
.DS_Store
|
26 |
+
.DS_Store?
|
27 |
+
._*
|
28 |
+
.Spotlight-V100
|
29 |
+
.Trashes
|
30 |
+
ehthumbs.db
|
31 |
+
Thumbs.db
|
32 |
+
|
33 |
+
# 3D data #
|
34 |
+
############
|
35 |
+
*.mat
|
36 |
+
*.obj
|
37 |
+
*.dat
|
38 |
+
*.npz
|
39 |
+
*.pkl
|
40 |
+
|
41 |
+
# python file #
|
42 |
+
############
|
43 |
+
*.pyc
|
44 |
+
__pycache__
|
45 |
+
|
46 |
+
## deca data
|
47 |
+
data/FLAME2020/generic_model.pkl
|
48 |
+
data/FLAME2020/female_model.pkl
|
49 |
+
data/FLAME2020/male_model.pkl
|
50 |
+
data/FLAME2020/FLAME_albedo_from_BFM.npz
|
51 |
+
results
|
52 |
+
output
|
53 |
+
TestSamples
|
54 |
+
|
55 |
+
## dump files
|
56 |
+
__dump
|
57 |
+
|
58 |
+
## visual code files
|
59 |
+
.vscode
|
60 |
+
render_dataset.py
|
61 |
+
shapes.pt
|
62 |
+
partial
|
63 |
+
images
|
64 |
+
*.pt
|
65 |
+
testing/now/jobs
|
66 |
+
testing/now/logs
|
67 |
+
testing/stirling/logs
|
68 |
+
testing/stirling/jobs
|
69 |
+
|
70 |
+
demo/arcface
|
71 |
+
demo/output
|
src/pixel3dmm/preprocessing/MICA/LICENSE
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
License
|
2 |
+
|
3 |
+
Software Copyright License for non-commercial scientific research purposes
|
4 |
+
Please read carefully the following terms and conditions and any accompanying documentation before you download
|
5 |
+
and/or use the MICA model, data and software, (the "Model & Software"), including 3D meshes, software, and scripts.
|
6 |
+
By downloading and/or using the Model & Software (including downloading, cloning, installing, and any other use
|
7 |
+
of this github repository), you acknowledge that you have read these terms and conditions, understand them, and
|
8 |
+
agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use
|
9 |
+
the Model & Software. Any infringement of the terms of this agreement will automatically terminate your rights
|
10 |
+
under this License
|
11 |
+
|
12 |
+
Ownership / Licensees
|
13 |
+
The Model & Software and the associated materials has been developed at the
|
14 |
+
Max Planck Institute for Intelligent Systems (hereinafter "MPI").
|
15 |
+
|
16 |
+
Any copyright or patent right is owned by and proprietary material of the
|
17 |
+
Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (hereinafter “MPG”; MPI and MPG hereinafter
|
18 |
+
collectively “Max-Planck”) hereinafter the “Licensor”.
|
19 |
+
|
20 |
+
License Grant
|
21 |
+
Licensor grants you (Licensee) personally a single-user, non-exclusive, non-transferable, free of charge right:
|
22 |
+
|
23 |
+
• To install the Model & Software on computers owned, leased or otherwise controlled by you and/or your organization.
|
24 |
+
• To use the Model & Software for the sole purpose of performing peaceful non-commercial scientific research,
|
25 |
+
non-commercial education, or non-commercial artistic projects.
|
26 |
+
|
27 |
+
Any other use, in particular any use for commercial, pornographic, military, or surveillance purposes is prohibited.
|
28 |
+
This includes, without limitation, incorporation in a commercial product, use in a commercial service,
|
29 |
+
or production of other artefacts for commercial purposes.
|
30 |
+
|
31 |
+
The Model & Software may not be used to create fake, libelous, misleading, or defamatory content of any kind, excluding
|
32 |
+
analyses in peer-reviewed scientific research.
|
33 |
+
|
34 |
+
The Model & Software may not be reproduced, modified and/or made available in any form to any third party
|
35 |
+
without Max-Planck’s prior written permission.
|
36 |
+
|
37 |
+
The Model & Software may not be used for pornographic purposes or to generate pornographic material whether
|
38 |
+
commercial or not. This license also prohibits the use of the Model & Software to train methods/algorithms/neural
|
39 |
+
networks/etc. for commercial use of any kind. By downloading the Model & Software, you agree not to reverse engineer it.
|
40 |
+
|
41 |
+
No Distribution
|
42 |
+
The Model & Software and the license herein granted shall not be copied, shared, distributed, re-sold, offered
|
43 |
+
for re-sale, transferred or sub-licensed in whole or in part except that you may make one copy for archive
|
44 |
+
purposes only.
|
45 |
+
|
46 |
+
Disclaimer of Representations and Warranties
|
47 |
+
You expressly acknowledge and agree that the Model & Software results from basic research, is provided “AS IS”,
|
48 |
+
may contain errors, and that any use of the Model & Software is at your sole risk.
|
49 |
+
LICENSOR MAKES NO REPRESENTATIONS
|
50 |
+
OR WARRANTIES OF ANY KIND CONCERNING THE MODEL & SOFTWARE, NEITHER EXPRESS NOR IMPLIED, AND THE ABSENCE OF ANY
|
51 |
+
LEGAL OR ACTUAL DEFECTS, WHETHER DISCOVERABLE OR NOT. Specifically, and not to limit the foregoing, licensor
|
52 |
+
makes no representations or warranties (i) regarding the merchantability or fitness for a particular purpose of
|
53 |
+
the Model & Software, (ii) that the use of the Model & Software will not infringe any patents, copyrights or other
|
54 |
+
intellectual property rights of a third party, and (iii) that the use of the Model & Software will not cause any
|
55 |
+
damage of any kind to you or a third party.
|
56 |
+
|
57 |
+
Limitation of Liability
|
58 |
+
Because this Model & Software License Agreement qualifies as a donation, according to Section 521 of the German
|
59 |
+
Civil Code (Bürgerliches Gesetzbuch – BGB) Licensor as a donor is liable for intent and gross negligence only.
|
60 |
+
If the Licensor fraudulently conceals a legal or material defect, they are obliged to compensate the Licensee
|
61 |
+
for the resulting damage.
|
62 |
+
|
63 |
+
Licensor shall be liable for loss of data only up to the amount of typical recovery costs which would have
|
64 |
+
arisen had proper and regular data backup measures been taken. For the avoidance of doubt Licensor shall be
|
65 |
+
liable in accordance with the German Product Liability Act in the event of product liability. The foregoing
|
66 |
+
applies also to Licensor’s legal representatives or assistants in performance. Any further liability shall
|
67 |
+
be excluded. Patent claims generated through the usage of the Model & Software cannot be directed towards the copyright holders.
|
68 |
+
The Model & Software is provided in the state of development the licensor defines. If modified or extended by
|
69 |
+
Licensee, the Licensor makes no claims about the fitness of the Model & Software and is not responsible
|
70 |
+
for any problems such modifications cause.
|
71 |
+
|
72 |
+
No Maintenance Services
|
73 |
+
You understand and agree that Licensor is under no obligation to provide either maintenance services,
|
74 |
+
update services, notices of latent defects, or corrections of defects with regard to the Model & Software.
|
75 |
+
Licensor nevertheless reserves the right to update, modify, or discontinue the Model & Software at any time.
|
76 |
+
|
77 |
+
Defects of the Model & Software must be notified in writing to the Licensor with a comprehensible description
|
78 |
+
of the error symptoms. The notification of the defect should enable the reproduction of the error.
|
79 |
+
The Licensee is encouraged to communicate any use, results, modification or publication.
|
80 |
+
|
81 |
+
Publications using the Model & Software
|
82 |
+
You acknowledge that the Model & Software is a valuable scientific resource and agree to appropriately reference
|
83 |
+
the following paper in any publication making use of the Model & Software.
|
84 |
+
|
85 |
+
Commercial licensing opportunities
|
86 |
+
For commercial uses of the Model & Software, please send email to [email protected]
|
87 |
+
|
88 |
+
This Agreement shall be governed by the laws of the Federal Republic of Germany except for the UN Sales Convention.
|
src/pixel3dmm/preprocessing/MICA/README.md
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<h2 align="center"><b>MICA - Towards Metrical Reconstruction of Human Faces</b></h2>
|
2 |
+
|
3 |
+
<h4 align="center"><b><a href="https://zielon.github.io/" target="_blank">Wojciech Zielonka</a>, <a href="https://sites.google.com/site/bolkartt/" target="_blank">Timo Bolkart</a>, <a href="https://justusthies.github.io/" target="_blank">Justus Thies</a></b></h4>
|
4 |
+
|
5 |
+
<h6 align="center"><i>Max Planck Institute for Intelligent Systems, Tübingen, Germany</i></h6>
|
6 |
+
|
7 |
+
<h4 align="center">
|
8 |
+
<a href="https://youtu.be/vzzEbvv08VA" target="_blank">Video </a>
|
9 |
+
<a href="https://arxiv.org/pdf/2204.06607.pdf" target="_blank">Paper </a>
|
10 |
+
<a href="https://zielon.github.io/mica/" target="_blank">Project Website </a>
|
11 |
+
<a href="https://github.com/Zielon/metrical-tracker" target="_blank">Face Tracker </a>
|
12 |
+
<a href="https://github.com/Zielon/MICA/tree/master/datasets/" target="_blank"><b>Dataset </b></a>
|
13 |
+
<a href="https://keeper.mpdl.mpg.de/f/6b12c44378e64738b993/" target="_blank">Supplemental </a>
|
14 |
+
<a href="mailto:mica@tue.mpg.de">Email</a>
|
15 |
+
</h4>
|
16 |
+
|
17 |
+
<div align="center">
|
18 |
+
<img src="documents/teaser.jpg">
|
19 |
+
<i style="font-size: 1.05em;">Official Repository for ECCV 2022 paper Towards Metrical Reconstruction of Human Faces</i>
|
20 |
+
</div>
|
21 |
+
<br>
|
22 |
+
|
23 |
+
<div align="center">
|
24 |
+
⚠ The face tracker is now available under <a href="https://github.com/Zielon/metrical-tracker" target="_blank">Metrical Photometric Tracker </a> ⚠
|
25 |
+
</div>
|
26 |
+
|
27 |
+
### Installation
|
28 |
+
|
29 |
+
After cloning the repository please install the environment by using attached conda `environment.yml` file with the command
|
30 |
+
``conda env create -f environment.yml``. Additionally, the FLAME2020 model is needed. To obtain it please create an account at the [website](https://flame.is.tue.mpg.de/) download the model and place it in the `/data/pretrained/FLAME2020/` folder.
|
31 |
+
|
32 |
+
You can also simply run the `install.sh` script:
|
33 |
+
|
34 |
+
```shell
|
35 |
+
git clone https://github.com/Zielon/MICA.git
|
36 |
+
cd MICA
|
37 |
+
./install.sh
|
38 |
+
```
|
39 |
+
you will be asked to provide `{flame_user}` and `{flame_password}` for your FLAME account in order to access the file server.
|
40 |
+
|
41 |
+
### Pre-trained Models
|
42 |
+
|
43 |
+
If you decide to not use the installation script, the pretrained model can be found under the [link](https://drive.google.com/file/d/1bYsI_spptzyuFmfLYqYkcJA6GZWZViNt/view?usp=sharing). After downloading, please place it in the `/data/pretrained/mica.tar` location. Additionally, you will need to provide models for `inisghtface`:
|
44 |
+
1) [antelopev2](https://drive.google.com/file/d/16PWKI_RjjbE4_kqpElG-YFqe8FpXjads/view?usp=sharing)
|
45 |
+
2) [buffalo_l](https://drive.google.com/file/d/1navJMy0DTr1_DHjLWu1i48owCPvXWfYc/view?usp=sharing)
|
46 |
+
|
47 |
+
then you need to unzip them and place in `~/.insightface/models/`. The `install.sh` script does it for you.
|
48 |
+
|
49 |
+
### How To Use
|
50 |
+
|
51 |
+
To use MICA you can simply run the `demo.py` file. It will process all the images from `demo/input/` folder and create the output destination for each subject with `.ply` mesh, rendered image, and `.npy` FLAME parameters.
|
52 |
+
|
53 |
+
### Dataset and Training
|
54 |
+
|
55 |
+
The MICA dataset consists of eight smaller datasets for about 2300 subjects under a common FLAME topology. Read more information about how to obtain and use it under the [link](https://github.com/Zielon/MICA/tree/master/datasets/). To train MICA the images from all eight datasets are needed. The repository contains scripts how to generate the Arcface input images as well as the complete list of all the images used for the training. More information can be found [here](https://github.com/Zielon/MICA/tree/master/datasets).
|
56 |
+
|
57 |
+
When you train from scratch for Arcface model initialization please download [Glint360K](https://github.com/deepinsight/insightface/tree/master/recognition/arcface_torch) and specify the path to it in the config as `cfg.model.arcface_pretrained_model`.
|
58 |
+
|
59 |
+
### Testing
|
60 |
+
|
61 |
+
The testing was done using two datasets, [Stirling](http://pics.stir.ac.uk/ESRC/) and [NoW](https://now.is.tue.mpg.de/). In the [model folder](https://github.com/Zielon/MICA/tree/master/models) you can find the corresponding scripts to run testing routine, which generates the meshes. To calculate the NoW challenge error you can use the following [repository](https://github.com/soubhiksanyal/now_evaluation).
|
62 |
+
|
63 |
+
### Citation
|
64 |
+
If you use this project in your research please cite MICA:
|
65 |
+
```bibtex
|
66 |
+
@proceedings{zielonka22mica,
|
67 |
+
author = {Zielonka, Wojciech and Bolkart, Timo and Thies, Justus},
|
68 |
+
title = {Towards Metrical Reconstruction of Human Faces},
|
69 |
+
journal = {European Conference on Computer Vision},
|
70 |
+
year = {2022}
|
71 |
+
}
|
72 |
+
```
|
src/pixel3dmm/preprocessing/MICA/condor.sh
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/sh
|
2 |
+
|
3 |
+
# bash condor.sh 100 ./configs/mica.yml 1
|
4 |
+
|
5 |
+
# default parameters
|
6 |
+
BID=3
|
7 |
+
CONFIG=./configs/mica.yml
|
8 |
+
NODE_CONFIG=condor/config.sub
|
9 |
+
NODE_SCRIPT=./condor/job.sh
|
10 |
+
GPUS=1
|
11 |
+
GPU_TYPE=0
|
12 |
+
|
13 |
+
# set parameters
|
14 |
+
if [ -n "$1" ]; then BID=${1}; fi
|
15 |
+
if [ -n "$2" ]; then CONFIG=${2}; fi
|
16 |
+
if [ -n "$3" ]; then GPU_TYPE=${3}; fi
|
17 |
+
if [ -n "$4" ]; then GPUS=${4}; fi
|
18 |
+
if [ -n "$5" ]; then NODE_CONFIG=${5}; fi
|
19 |
+
if [ -n "$6" ]; then NODE_SCRIPT=${6}; fi
|
20 |
+
|
21 |
+
mkdir -p output/condor_logs
|
22 |
+
cp -nf ${NODE_CONFIG}{,.bak}
|
23 |
+
|
24 |
+
GPU_NAME=Error
|
25 |
+
|
26 |
+
if [ $GPU_TYPE -eq 0 ]; then GPU_NAME='Quadro RTX 6000'; fi
|
27 |
+
if [ $GPU_TYPE -eq 1 ]; then GPU_NAME='Tesla V100-SXM2-32GB'; fi
|
28 |
+
if [ $GPU_TYPE -eq 2 ]; then GPU_NAME='NVIDIA GeForce RTX 2080 Ti'; fi
|
29 |
+
|
30 |
+
NAME=$(basename ${CONFIG} .yml)
|
31 |
+
sed -i "s/{errorfile}/${NAME}/" ${NODE_CONFIG}.bak
|
32 |
+
sed -i "s/{outfile}/${NAME}/" ${NODE_CONFIG}.bak
|
33 |
+
sed -i "s/{logfile}/${NAME}/" ${NODE_CONFIG}.bak
|
34 |
+
sed -i "s/{gpus}/${GPUS}/" ${NODE_CONFIG}.bak
|
35 |
+
sed -i "s/{gpu_name}/${GPU_NAME}/" ${NODE_CONFIG}.bak
|
36 |
+
|
37 |
+
# start node and execute script
|
38 |
+
echo 'Executing:' ${NODE_SCRIPT} ${CONFIG}
|
39 |
+
echo '# BID:' ${BID}
|
40 |
+
echo '# GPUS:' ${GPUS}
|
41 |
+
echo '# GPU NAME:' ${GPU_NAME}
|
42 |
+
|
43 |
+
condor_submit_bid ${BID} ${NODE_CONFIG}.bak -append "arguments = ${NODE_SCRIPT} ${CONFIG}"
|
44 |
+
rm ${NODE_CONFIG}.bak
|
src/pixel3dmm/preprocessing/MICA/condor/config.sub
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
executable = /bin/bash
|
2 |
+
error = ./output/condor_logs/{errorfile}_$(ClusterId).$(ProcId).err
|
3 |
+
output = ./output/condor_logs/{outfile}_$(ClusterId).$(ProcId).out
|
4 |
+
log = ./output/condor_logs/{logfile}_$(ClusterId).$(ProcId).log
|
5 |
+
request_memory = 32768
|
6 |
+
request_cpus = 6
|
7 |
+
request_gpus = {gpus}
|
8 |
+
+WantGPUStats = true
|
9 |
+
requirements = (TARGET.CUDADeviceName=="{gpu_name}")
|
10 |
+
# EXIT SETTINGS
|
11 |
+
on_exit_hold = (ExitCode =?= 3)
|
12 |
+
on_exit_hold_reason = "Checkpointed, will resume"
|
13 |
+
on_exit_hold_subcode = 2
|
14 |
+
periodic_release = ( (JobStatus =?= 5) && (HoldReasonCode =?= 3) && (HoldReasonSubCode =?= 2) )
|
15 |
+
+RunningPriceExceededAction = "kill"
|
16 |
+
queue
|
src/pixel3dmm/preprocessing/MICA/condor/dataset.sh
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/sh
|
2 |
+
|
3 |
+
CONFIG=${1}
|
4 |
+
|
5 |
+
PYTHON_ENV=/home/wzielonka/miniconda3/etc/profile.d/conda.sh
|
6 |
+
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
7 |
+
export PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:$PATH
|
8 |
+
export PYTHONPATH="${PYTHONPATH}:/home/wzielonka/projects/OnFlame-internal/"
|
9 |
+
|
10 |
+
echo 'START JOB (dataset generation)'
|
11 |
+
|
12 |
+
module load cuda/10.1
|
13 |
+
module load gcc/4.9
|
14 |
+
|
15 |
+
echo 'ACTIVATE MICA'
|
16 |
+
source ${PYTHON_ENV}
|
17 |
+
conda activate MICA
|
18 |
+
|
19 |
+
echo 'RUN SCRIPT'
|
20 |
+
cd ${SCRIPT_DIR}/../datasets/creation
|
21 |
+
python ./main.py
|
src/pixel3dmm/preprocessing/MICA/condor/job.sh
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/sh
|
2 |
+
|
3 |
+
CONFIG=${1}
|
4 |
+
|
5 |
+
PYTHON_ENV=/home/wzielonka/miniconda3/etc/profile.d/conda.sh
|
6 |
+
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
7 |
+
export PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:$PATH
|
8 |
+
|
9 |
+
echo 'START JOB (MICA training)'
|
10 |
+
|
11 |
+
module load cuda/10.1
|
12 |
+
module load gcc/4.9
|
13 |
+
|
14 |
+
echo 'ACTIVATE MICA'
|
15 |
+
source ${PYTHON_ENV}
|
16 |
+
conda activate MICA
|
17 |
+
|
18 |
+
echo 'RUN SCRIPT'
|
19 |
+
#echo 'ScriptDir' ${SCRIPT_DIR}
|
20 |
+
echo 'CONFIG: ' ${CONFIG}
|
21 |
+
cd ${SCRIPT_DIR}/..
|
22 |
+
python ./train.py --cfg ${CONFIG}
|
src/pixel3dmm/preprocessing/MICA/configs/__init__.py
ADDED
File without changes
|
src/pixel3dmm/preprocessing/MICA/configs/config.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import os
|
20 |
+
|
21 |
+
from yacs.config import CfgNode as CN
|
22 |
+
|
23 |
+
cfg = CN()
|
24 |
+
|
25 |
+
abs_mica_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
26 |
+
cfg.mica_dir = abs_mica_dir
|
27 |
+
cfg.device = 'cuda'
|
28 |
+
cfg.device_id = '0'
|
29 |
+
cfg.pretrained_model_path = os.path.join(cfg.mica_dir, 'data/pretrained', 'mica.tar')
|
30 |
+
cfg.output_dir = ''
|
31 |
+
|
32 |
+
# ---------------------------------------------------------------------------- #
|
33 |
+
# Options for Face model
|
34 |
+
# ---------------------------------------------------------------------------- #
|
35 |
+
cfg.model = CN()
|
36 |
+
cfg.model.testing = False
|
37 |
+
cfg.model.name = 'mica'
|
38 |
+
|
39 |
+
cfg.model.topology_path = os.path.join(cfg.mica_dir, 'data/FLAME2020', 'head_template.obj')
|
40 |
+
cfg.model.flame_model_path = os.path.join(cfg.mica_dir, 'data/FLAME2020', 'generic_model.pkl')
|
41 |
+
cfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, 'data/FLAME2020', 'landmark_embedding.npy')
|
42 |
+
cfg.model.n_shape = 300
|
43 |
+
cfg.model.layers = 8
|
44 |
+
cfg.model.hidden_layers_size = 256
|
45 |
+
cfg.model.mapping_layers = 3
|
46 |
+
cfg.model.use_pretrained = True
|
47 |
+
cfg.model.arcface_pretrained_model = '/scratch/is-rg-ncs/models_weights/arcface-torch/backbone100.pth'
|
48 |
+
|
49 |
+
# ---------------------------------------------------------------------------- #
|
50 |
+
# Options for Dataset
|
51 |
+
# ---------------------------------------------------------------------------- #
|
52 |
+
cfg.dataset = CN()
|
53 |
+
cfg.dataset.training_data = ['LYHM']
|
54 |
+
cfg.dataset.eval_data = ['FLORENCE']
|
55 |
+
cfg.dataset.batch_size = 2
|
56 |
+
cfg.dataset.K = 4
|
57 |
+
cfg.dataset.n_train = 100000
|
58 |
+
cfg.dataset.num_workers = 4
|
59 |
+
cfg.dataset.root = '/datasets/MICA/'
|
60 |
+
|
61 |
+
# ---------------------------------------------------------------------------- #
|
62 |
+
# Mask weights
|
63 |
+
# ---------------------------------------------------------------------------- #
|
64 |
+
cfg.mask_weights = CN()
|
65 |
+
cfg.mask_weights.face = 150.0
|
66 |
+
cfg.mask_weights.nose = 50.0
|
67 |
+
cfg.mask_weights.lips = 50.0
|
68 |
+
cfg.mask_weights.forehead = 50.0
|
69 |
+
cfg.mask_weights.lr_eye_region = 50.0
|
70 |
+
cfg.mask_weights.eye_region = 50.0
|
71 |
+
|
72 |
+
cfg.mask_weights.whole = 1.0
|
73 |
+
cfg.mask_weights.ears = 0.01
|
74 |
+
cfg.mask_weights.eyes = 0.01
|
75 |
+
|
76 |
+
cfg.running_average = 7
|
77 |
+
|
78 |
+
# ---------------------------------------------------------------------------- #
|
79 |
+
# Options for training
|
80 |
+
# ---------------------------------------------------------------------------- #
|
81 |
+
cfg.train = CN()
|
82 |
+
cfg.train.use_mask = False
|
83 |
+
cfg.train.max_epochs = 50
|
84 |
+
cfg.train.max_steps = 100000
|
85 |
+
cfg.train.lr = 1e-4
|
86 |
+
cfg.train.arcface_lr = 1e-3
|
87 |
+
cfg.train.weight_decay = 0.0
|
88 |
+
cfg.train.lr_update_step = 100000000
|
89 |
+
cfg.train.log_dir = 'logs'
|
90 |
+
cfg.train.log_steps = 10
|
91 |
+
cfg.train.vis_dir = 'train_images'
|
92 |
+
cfg.train.vis_steps = 200
|
93 |
+
cfg.train.write_summary = True
|
94 |
+
cfg.train.checkpoint_steps = 1000
|
95 |
+
cfg.train.checkpoint_epochs_steps = 2
|
96 |
+
cfg.train.val_steps = 1000
|
97 |
+
cfg.train.val_vis_dir = 'val_images'
|
98 |
+
cfg.train.eval_steps = 5000
|
99 |
+
cfg.train.reset_optimizer = False
|
100 |
+
cfg.train.val_save_img = 5000
|
101 |
+
cfg.test_dataset = 'now'
|
102 |
+
|
103 |
+
|
104 |
+
def get_cfg_defaults():
|
105 |
+
return cfg.clone()
|
106 |
+
|
107 |
+
|
108 |
+
def update_cfg(cfg, cfg_file):
|
109 |
+
cfg.merge_from_file(cfg_file)
|
110 |
+
return cfg.clone()
|
111 |
+
|
112 |
+
|
113 |
+
def parse_args():
|
114 |
+
parser = argparse.ArgumentParser()
|
115 |
+
parser.add_argument('--cfg', type=str, help='cfg file path', required=True)
|
116 |
+
parser.add_argument('--test_dataset', type=str, help='Test dataset type', default='')
|
117 |
+
parser.add_argument('--checkpoint', type=str, help='Checkpoint to load', default='')
|
118 |
+
|
119 |
+
args = parser.parse_args()
|
120 |
+
print(args, end='\n\n')
|
121 |
+
|
122 |
+
cfg = get_cfg_defaults()
|
123 |
+
if args.cfg is not None:
|
124 |
+
cfg_file = args.cfg
|
125 |
+
cfg = update_cfg(cfg, args.cfg)
|
126 |
+
cfg.cfg_file = cfg_file
|
127 |
+
|
128 |
+
return cfg, args
|
src/pixel3dmm/preprocessing/MICA/configs/mica.yml
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Mica config
|
2 |
+
|
3 |
+
pretrained_model_path: ''
|
4 |
+
|
5 |
+
dataset:
|
6 |
+
root: '/scratch/NFC/MICA/dataset/'
|
7 |
+
training_data: [ 'LYHM', 'D3DFACS', 'BU3DFE', 'FRGC', 'Stirling', 'FaceWarehouse', 'BP4D' ]
|
8 |
+
eval_data: [ 'FLORENCE' ]
|
9 |
+
num_workers: 4
|
10 |
+
batch_size: 8
|
11 |
+
K: 2
|
12 |
+
|
13 |
+
train:
|
14 |
+
lr: 1e-5
|
15 |
+
arcface_lr: 1e-5
|
16 |
+
weight_decay: 2e-4
|
17 |
+
use_mask: True
|
18 |
+
reset_optimizer: False
|
19 |
+
max_steps: 160000
|
20 |
+
log_steps: 50
|
21 |
+
val_steps: 300
|
22 |
+
vis_steps: 1200
|
23 |
+
val_save_img: 1200
|
24 |
+
checkpoint_steps: 1000
|
25 |
+
checkpoint_epochs_steps: 10000
|
26 |
+
|
27 |
+
model:
|
28 |
+
use_pretrained: False
|
29 |
+
n_shape: 300
|
30 |
+
name: 'mica'
|
src/pixel3dmm/preprocessing/MICA/data/FLAME2020/FLAME_masks/FLAME_masks.gif
ADDED
![]() |
Git LFS Details
|
src/pixel3dmm/preprocessing/MICA/data/FLAME2020/FLAME_masks/FLAME_masks.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ccefbe1ac0774ff78c68caf2c627b4abc067a6555ebeb0be5d5b0812366ab492
|
3 |
+
size 215062
|
src/pixel3dmm/preprocessing/MICA/data/FLAME2020/FLAME_masks/readme
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Dictionary with vertex indices for different masks for the publicly available FLAME head model (https://flame.is.tue.mpg.de/).
|
2 |
+
See the gif for a visualization of all masks.
|
src/pixel3dmm/preprocessing/MICA/data/FLAME2020/head_template.obj
ADDED
The diff for this file is too large to render.
See raw diff
|
|
src/pixel3dmm/preprocessing/MICA/data/FLAME2020/landmark_embedding.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8095348eeafce5a02f6bd8765146307f9567a3f03b316d788a2e47336d667954
|
3 |
+
size 31292
|
src/pixel3dmm/preprocessing/MICA/datasets/README.md
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<h2 align="center"><b>MICA - Dataset</b></h2>
|
2 |
+
|
3 |
+
The <b>MICA</b> dataset consists of eight smaller datasets for about 2315 subjects, built by unifying existing small- and medium-scale datasets under a common FLAME topology. It consists of shape geometry only, therefore, to obtain images for each subject please refer to the primary dataset.
|
4 |
+
|
5 |
+
This dataset contains registration meshes together with corresponding fitted FLAME parameters. Actors are split between individual folders with a unique identifier based on the original dataset. The folder name of the parameters and mesh is the same as the ones in the analogous dataset with images.
|
6 |
+
|
7 |
+
To obtain the dataset please follow each link separately and request the given subset.
|
8 |
+
|
9 |
+
In the case of any questions feel free to <a href="mailto:mica@tue.mpg.de">email us</a>.
|
10 |
+
|
11 |
+
<div align="center">
|
12 |
+
<table cellspacing="0" cellpadding="0" bgcolor="#ffffff" border="0">
|
13 |
+
<tr>
|
14 |
+
<th>FLAME Renderings</th>
|
15 |
+
<th>Dataset</th>
|
16 |
+
<th>Subjects</th>
|
17 |
+
<th>Source</th>
|
18 |
+
</tr>
|
19 |
+
<tr></tr>
|
20 |
+
<tr>
|
21 |
+
<td><img src="../documents/STIRLING.gif"></td>
|
22 |
+
<td><b>Stirling</b></td>
|
23 |
+
<td>133</td>
|
24 |
+
<td><a href="http://pics.stir.ac.uk/ESRC/" target="_blank">pics.stir.ac.uk</a></td>
|
25 |
+
</tr>
|
26 |
+
<tr></tr>
|
27 |
+
<tr>
|
28 |
+
<td><img src="../documents/D3DFACS.gif"></td>
|
29 |
+
<td><b>D3DFACS</b></td>
|
30 |
+
<td>10</td>
|
31 |
+
<td><a href="https://www.cs.bath.ac.uk/~dpc/D3DFACS/" target="_blank">cs.bath.ac.uk</a></td>
|
32 |
+
</tr>
|
33 |
+
<tr></tr>
|
34 |
+
<tr>
|
35 |
+
<td><img src="../documents/FLORENCE.gif"></td>
|
36 |
+
<td><b>Florence 2D/3D</b></td>
|
37 |
+
<td>53</td>
|
38 |
+
<td><a href="https://www.micc.unifi.it/resources/datasets/florence-3d-faces/" target="_blank">micc.unifi.it</a></td>
|
39 |
+
</tr>
|
40 |
+
<tr></tr>
|
41 |
+
<tr>
|
42 |
+
<td><img src="../documents/LYHM.gif"></td>
|
43 |
+
<td><b>LYHM</b></td>
|
44 |
+
<td>1211</td>
|
45 |
+
<td><a href="https://www-users.york.ac.uk/~np7/research/Headspace/" target="_blank">cs.york.ac.uk</a></td>
|
46 |
+
</tr>
|
47 |
+
<tr></tr>
|
48 |
+
<tr>
|
49 |
+
<td><img src="../documents/FACEWAREHOUSE.gif"></td>
|
50 |
+
<td><b>FaceWarehouse</b></td>
|
51 |
+
<td>150</td>
|
52 |
+
<td><a href="http://kunzhou.net/zjugaps/facewarehouse/" target="_blank">kunzhou.net</a></td>
|
53 |
+
</tr>
|
54 |
+
<tr></tr>
|
55 |
+
<tr>
|
56 |
+
<td><img src="../documents/FRGC.gif"></td>
|
57 |
+
<td><b><a href="https://github.com/Zielon/MICA/tree/master/datasets/#frgc-subset">*FRGC</a></b></td>
|
58 |
+
<td>531</td>
|
59 |
+
<td><a href="https://cvrl.nd.edu/projects/data/#face-recognition-grand-challenge-frgc-v20-data-collection" target="_blank">cvrl.nd.edu</a</td>
|
60 |
+
</tr>
|
61 |
+
<tr></tr>
|
62 |
+
<tr>
|
63 |
+
<td><img src="../documents/BP4D.gif"></td>
|
64 |
+
<td><b><a href="https://github.com/Zielon/MICA/tree/master/datasets/#frgc-subset">*BP4D+</a></b></td>
|
65 |
+
<td>127</td>
|
66 |
+
<td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/3DFE_Analysis.html" target="_blank">cs.binghamton.edu</a></td>
|
67 |
+
</tr>
|
68 |
+
</table>
|
69 |
+
</div>
|
70 |
+
|
71 |
+
Each subset zip file has the following structure:
|
72 |
+
```shell
|
73 |
+
root\
|
74 |
+
FLAME_parameters\
|
75 |
+
actor_id\
|
76 |
+
*.npz
|
77 |
+
registrations\
|
78 |
+
actor_id\
|
79 |
+
*.obj
|
80 |
+
```
|
81 |
+
|
82 |
+
To retrieve FLAME2020 parameters you can simply do:
|
83 |
+
```python
|
84 |
+
import numpy as np
|
85 |
+
import torch
|
86 |
+
|
87 |
+
params = np.load('path.npz', allow_pickle=True)
|
88 |
+
pose = torch.tensor(params['pose']).float()
|
89 |
+
betas = torch.tensor(params['betas']).float()
|
90 |
+
|
91 |
+
flame = {
|
92 |
+
'shape_params': betas[:300],
|
93 |
+
'expression_params': betas[300:],
|
94 |
+
'pose_params': torch.cat([pose[:3], pose[6:9]]),
|
95 |
+
}
|
96 |
+
```
|
97 |
+
|
98 |
+
### MICA Training Dataset Preparation
|
99 |
+
|
100 |
+
To prepare the MICA training dataset you can follow the scripts from the [creation](https://github.com/Zielon/MICA/tree/master/datasets/creation) folder. Additionally, the complete list of images used for the training can be found in [image_paths](https://github.com/Zielon/MICA/tree/master/datasets/image_paths) folder. It contains the name of the FLAME parameters file `.npz` and a list of all images used for the training with their original name stored as dictionary.
|
101 |
+
|
102 |
+
### Licence
|
103 |
+
This dataset is for academic, non-commercial usage only. Moreover, it is an extension of already existing datasets, therefore, the license is shared and applies equivalently to both, the original and the corresponding derived one. Please read the original license of each original dataset for more information, especially in the context of data privacy.
|
104 |
+
|
105 |
+
### Additional Information
|
106 |
+
|
107 |
+
Please note that some subsets do not have an identical amount of subjects due to registration errors.
|
108 |
+
|
109 |
+
#### BP4D+ Subset
|
110 |
+
|
111 |
+
The original dataset contains 140 subjects from where we selected scans in neutral pose and successfully registered 127 FLAME meshes for them.
|
112 |
+
|
113 |
+
#### FRGC Subset
|
114 |
+
|
115 |
+
1) A group desiring to obtain FLAME results on the FRGC data must license the FRGC 2.0 data set from CVRL using the licensing procedure at the website: http://cvrl.nd.edu .
|
116 |
+
2) Once the license form is completed and CVRL has approved it, an email will be sent from Globus indicating that you have been authorized to retrieve FRGC 2.0.
|
117 |
+
3) That authorization will serve as authorization to receive any derivative work, therefore, please send a confirmation to mica [AT] tue.mpg.de address to receive the MICA dataset.
|
118 |
+
|
119 |
+
### Citation
|
120 |
+
If you use this dataset in your research please cite MICA:
|
121 |
+
```bibtex
|
122 |
+
@proceedings{MICA:ECCV2022,
|
123 |
+
author = {Zielonka, Wojciech and Bolkart, Timo and Thies, Justus},
|
124 |
+
title = {Towards Metrical Reconstruction of Human Faces},
|
125 |
+
journal = {European Conference on Computer Vision},
|
126 |
+
year = {2022}
|
127 |
+
}
|
128 |
+
```
|
src/pixel3dmm/preprocessing/MICA/datasets/__init__.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
from torch.utils.data import ConcatDataset
|
20 |
+
|
21 |
+
from datasets.base import BaseDataset
|
22 |
+
|
23 |
+
|
24 |
+
def build_train(config, device):
|
25 |
+
data_list = []
|
26 |
+
total_images = 0
|
27 |
+
for dataset in config.training_data:
|
28 |
+
dataset_name = dataset.upper()
|
29 |
+
config.n_train = np.Inf
|
30 |
+
if type(dataset) is list:
|
31 |
+
dataset_name, n_train = dataset
|
32 |
+
config.n_train = n_train
|
33 |
+
|
34 |
+
dataset = BaseDataset(name=dataset_name, config=config, device=device, isEval=False)
|
35 |
+
data_list.append(dataset)
|
36 |
+
total_images += dataset.total_images
|
37 |
+
|
38 |
+
return ConcatDataset(data_list), total_images
|
39 |
+
|
40 |
+
|
41 |
+
def build_val(config, device):
|
42 |
+
data_list = []
|
43 |
+
total_images = 0
|
44 |
+
for dataset in config.eval_data:
|
45 |
+
dataset_name = dataset.upper()
|
46 |
+
config.n_train = np.Inf
|
47 |
+
if type(dataset) is list:
|
48 |
+
dataset_name, n_train = dataset
|
49 |
+
config.n_train = n_train
|
50 |
+
|
51 |
+
dataset = BaseDataset(name=dataset_name, config=config, device=device, isEval=True)
|
52 |
+
data_list.append(dataset)
|
53 |
+
total_images += dataset.total_images
|
54 |
+
|
55 |
+
return ConcatDataset(data_list), total_images
|
src/pixel3dmm/preprocessing/MICA/datasets/base.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
import os
|
19 |
+
import re
|
20 |
+
from abc import ABC
|
21 |
+
from functools import reduce
|
22 |
+
from pathlib import Path
|
23 |
+
|
24 |
+
import loguru
|
25 |
+
import numpy as np
|
26 |
+
import torch
|
27 |
+
from loguru import logger
|
28 |
+
from skimage.io import imread
|
29 |
+
from torch.utils.data import Dataset
|
30 |
+
from torchvision import transforms
|
31 |
+
|
32 |
+
|
33 |
+
class BaseDataset(Dataset, ABC):
|
34 |
+
def __init__(self, name, config, device, isEval):
|
35 |
+
self.K = config.K
|
36 |
+
self.isEval = isEval
|
37 |
+
self.n_train = np.Inf
|
38 |
+
self.imagepaths = []
|
39 |
+
self.face_dict = {}
|
40 |
+
self.name = name
|
41 |
+
self.device = device
|
42 |
+
self.min_max_K = 0
|
43 |
+
self.cluster = False
|
44 |
+
self.dataset_root = config.root
|
45 |
+
self.total_images = 0
|
46 |
+
self.image_folder = 'arcface_input'
|
47 |
+
self.flame_folder = 'FLAME_parameters'
|
48 |
+
self.initialize()
|
49 |
+
|
50 |
+
def initialize(self):
|
51 |
+
logger.info(f'[{self.name}] Initialization')
|
52 |
+
image_list = f'{os.path.abspath(os.path.dirname(__file__))}/image_paths/{self.name}.npy'
|
53 |
+
logger.info(f'[{self.name}] Load cached file list: ' + image_list)
|
54 |
+
self.face_dict = np.load(image_list, allow_pickle=True).item()
|
55 |
+
self.imagepaths = list(self.face_dict.keys())
|
56 |
+
logger.info(f'[Dataset {self.name}] Total {len(self.imagepaths)} actors loaded!')
|
57 |
+
self.set_smallest_k()
|
58 |
+
|
59 |
+
def set_smallest_k(self):
|
60 |
+
self.min_max_K = np.Inf
|
61 |
+
max_min_k = -np.Inf
|
62 |
+
for key in self.face_dict.keys():
|
63 |
+
length = len(self.face_dict[key][0])
|
64 |
+
if length < self.min_max_K:
|
65 |
+
self.min_max_K = length
|
66 |
+
if length > max_min_k:
|
67 |
+
max_min_k = length
|
68 |
+
|
69 |
+
self.total_images = reduce(lambda k, l: l + k, map(lambda e: len(self.face_dict[e][0]), self.imagepaths))
|
70 |
+
loguru.logger.info(f'Dataset {self.name} with min K = {self.min_max_K} max K = {max_min_k} length = {len(self.face_dict)} total images = {self.total_images}')
|
71 |
+
return self.min_max_K
|
72 |
+
|
73 |
+
def compose_transforms(self, *args):
|
74 |
+
self.transforms = transforms.Compose([t for t in args])
|
75 |
+
|
76 |
+
def get_arcface_path(self, image_path):
|
77 |
+
return re.sub('png|jpg', 'npy', str(image_path))
|
78 |
+
|
79 |
+
def __len__(self):
|
80 |
+
return len(self.imagepaths)
|
81 |
+
|
82 |
+
def __getitem__(self, index):
|
83 |
+
actor = self.imagepaths[index]
|
84 |
+
images, params_path = self.face_dict[actor]
|
85 |
+
images = [Path(self.dataset_root, self.name, self.image_folder, path) for path in images]
|
86 |
+
sample_list = np.array(np.random.choice(range(len(images)), size=self.K, replace=False))
|
87 |
+
|
88 |
+
K = self.K
|
89 |
+
if self.isEval:
|
90 |
+
K = max(0, min(200, self.min_max_K))
|
91 |
+
sample_list = np.array(range(len(images))[:K])
|
92 |
+
|
93 |
+
params = np.load(os.path.join(self.dataset_root, self.name, self.flame_folder, params_path), allow_pickle=True)
|
94 |
+
pose = torch.tensor(params['pose']).float()
|
95 |
+
betas = torch.tensor(params['betas']).float()
|
96 |
+
|
97 |
+
flame = {
|
98 |
+
'shape_params': torch.cat(K * [betas[:300][None]], dim=0),
|
99 |
+
'expression_params': torch.cat(K * [betas[300:][None]], dim=0),
|
100 |
+
'pose_params': torch.cat(K * [torch.cat([pose[:3], pose[6:9]])[None]], dim=0),
|
101 |
+
}
|
102 |
+
|
103 |
+
images_list = []
|
104 |
+
arcface_list = []
|
105 |
+
|
106 |
+
for i in sample_list:
|
107 |
+
image_path = images[i]
|
108 |
+
image = np.array(imread(image_path))
|
109 |
+
image = image / 255.
|
110 |
+
image = image.transpose(2, 0, 1)
|
111 |
+
arcface_image = np.load(self.get_arcface_path(image_path), allow_pickle=True)
|
112 |
+
|
113 |
+
images_list.append(image)
|
114 |
+
arcface_list.append(torch.tensor(arcface_image))
|
115 |
+
|
116 |
+
images_array = torch.from_numpy(np.array(images_list)).float()
|
117 |
+
arcface_array = torch.stack(arcface_list).float()
|
118 |
+
|
119 |
+
return {
|
120 |
+
'image': images_array,
|
121 |
+
'arcface': arcface_array,
|
122 |
+
'imagename': actor,
|
123 |
+
'dataset': self.name,
|
124 |
+
'flame': flame,
|
125 |
+
}
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/__init__.py
ADDED
File without changes
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/generator.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
import os
|
19 |
+
from glob import glob
|
20 |
+
from multiprocessing import Pool
|
21 |
+
from pathlib import Path
|
22 |
+
from typing import List
|
23 |
+
|
24 |
+
import cv2
|
25 |
+
import numpy as np
|
26 |
+
from insightface.app import FaceAnalysis
|
27 |
+
from insightface.app.common import Face
|
28 |
+
from insightface.utils import face_align
|
29 |
+
from loguru import logger
|
30 |
+
from tqdm import tqdm
|
31 |
+
|
32 |
+
from datasets.creation.instances.instance import Instance
|
33 |
+
from datasets.creation.util import get_image, get_center, get_arcface_input
|
34 |
+
|
35 |
+
|
36 |
+
def _transfer(src, dst):
|
37 |
+
src.parent.mkdir(parents=True, exist_ok=True)
|
38 |
+
dst.parent.mkdir(parents=True, exist_ok=True)
|
39 |
+
os.system(f'cp {str(src)} {str(dst)}')
|
40 |
+
|
41 |
+
|
42 |
+
def _copy(payload):
|
43 |
+
instance, func, target, transform_path = payload
|
44 |
+
files = func()
|
45 |
+
for actor in files.keys():
|
46 |
+
for file in files[actor]:
|
47 |
+
_transfer(Path(file), Path(instance.get_dst(), target, actor, transform_path(file)))
|
48 |
+
|
49 |
+
|
50 |
+
class Generator:
|
51 |
+
def __init__(self, instances):
|
52 |
+
self.instances: List[Instance] = instances
|
53 |
+
self.ARCFACE = 'arcface_input'
|
54 |
+
|
55 |
+
def copy(self):
|
56 |
+
logger.info('Start copying...')
|
57 |
+
for instance in tqdm(self.instances):
|
58 |
+
payloads = [(instance, instance.get_images, 'images', instance.transform_path)]
|
59 |
+
with Pool(processes=len(payloads)) as pool:
|
60 |
+
for _ in tqdm(pool.imap_unordered(_copy, payloads), total=len(payloads)):
|
61 |
+
pass
|
62 |
+
|
63 |
+
def preprocess(self):
|
64 |
+
logger.info('Start preprocessing...')
|
65 |
+
for instance in tqdm(self.instances):
|
66 |
+
instance.preprocess()
|
67 |
+
|
68 |
+
def arcface(self):
|
69 |
+
app = FaceAnalysis(name='antelopev2', providers=['CUDAExecutionProvider'])
|
70 |
+
app.prepare(ctx_id=0, det_size=(224, 224))
|
71 |
+
|
72 |
+
logger.info('Start arcface...')
|
73 |
+
for instance in tqdm(self.instances):
|
74 |
+
src = instance.get_dst()
|
75 |
+
for image_path in tqdm(sorted(glob(f'{src}/images/*/*'))):
|
76 |
+
dst = image_path.replace('images', self.ARCFACE)
|
77 |
+
Path(dst).parent.mkdir(exist_ok=True, parents=True)
|
78 |
+
for img in instance.transform_image(get_image(image_path[0:-4])):
|
79 |
+
bboxes, kpss = app.det_model.detect(img, max_num=0, metric='default')
|
80 |
+
if bboxes.shape[0] == 0:
|
81 |
+
continue
|
82 |
+
i = get_center(bboxes, img)
|
83 |
+
bbox = bboxes[i, 0:4]
|
84 |
+
det_score = bboxes[i, 4]
|
85 |
+
if det_score < instance.get_min_det_score():
|
86 |
+
continue
|
87 |
+
kps = None
|
88 |
+
if kpss is not None:
|
89 |
+
kps = kpss[i]
|
90 |
+
face = Face(bbox=bbox, kps=kps, det_score=det_score)
|
91 |
+
blob, aimg = get_arcface_input(face, img)
|
92 |
+
np.save(dst[0:-4], blob)
|
93 |
+
cv2.imwrite(dst, face_align.norm_crop(img, landmark=face.kps, image_size=224))
|
94 |
+
|
95 |
+
def run(self):
|
96 |
+
self.copy()
|
97 |
+
self.preprocess()
|
98 |
+
self.arcface()
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/__init__.py
ADDED
File without changes
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/bu3dfe.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
from abc import ABC
|
19 |
+
from glob import glob
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
from pytorch3d.io import load_objs_as_meshes
|
23 |
+
|
24 |
+
from datasets.creation.instances.instance import Instance
|
25 |
+
|
26 |
+
|
27 |
+
class BU3DFE(Instance, ABC):
|
28 |
+
def __init__(self):
|
29 |
+
super(BU3DFE, self).__init__()
|
30 |
+
self.dst = '/scratch/NFC/OnFlame/BU3DFE/'
|
31 |
+
self.src = '/scratch/NFC/BU-3DFE/'
|
32 |
+
|
33 |
+
def get_images(self):
|
34 |
+
images = {}
|
35 |
+
for actor in sorted(glob(self.get_src().replace('BU-3DFE', 'BU-3DFE_clean') + 'images/*')):
|
36 |
+
images[Path(actor).name] = glob(f'{actor}/*.jpg')
|
37 |
+
|
38 |
+
return images
|
39 |
+
|
40 |
+
def get_flame_params(self):
|
41 |
+
prams = {}
|
42 |
+
for actor in sorted(glob(self.get_src() + 'FLAME_parameters/iter2/*')):
|
43 |
+
prams[Path(actor).name] = glob(f'{actor}/*.npz')
|
44 |
+
|
45 |
+
return prams
|
46 |
+
|
47 |
+
def get_registrations(self):
|
48 |
+
registrations = {}
|
49 |
+
for actor in sorted(glob(self.get_src() + 'registrations/iter2/neutral_align/*')):
|
50 |
+
registrations[Path(actor).name] = glob(f'{actor}/*.obj')
|
51 |
+
|
52 |
+
return registrations
|
53 |
+
|
54 |
+
def get_meshes(self):
|
55 |
+
meshes = {}
|
56 |
+
files = sorted(glob(self.get_src() + 'raw_ne_data/*'))
|
57 |
+
actors = set(map(lambda f: Path(f).name[0:5], files))
|
58 |
+
for actor in actors:
|
59 |
+
meshes[Path(actor).name] = next(filter(lambda f: actor in f and 'obj' in f, files))
|
60 |
+
|
61 |
+
return meshes
|
62 |
+
|
63 |
+
def transform_mesh(self, path):
|
64 |
+
self.update_obj(path)
|
65 |
+
mesh = load_objs_as_meshes([path], device=self.device)
|
66 |
+
vertices = mesh._verts_list[0]
|
67 |
+
center = vertices.mean(0)
|
68 |
+
mesh._verts_list = [vertices - center]
|
69 |
+
mesh.scale_verts_(0.01)
|
70 |
+
|
71 |
+
return mesh.clone()
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/d3dfacs.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
from abc import ABC
|
19 |
+
from glob import glob
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
from datasets.creation.instances.instance import Instance
|
23 |
+
|
24 |
+
|
25 |
+
class D3DFACS(Instance, ABC):
|
26 |
+
def __init__(self):
|
27 |
+
super(D3DFACS, self).__init__()
|
28 |
+
self.dst = '/scratch/NFC/OnFlame/D3DFACS/'
|
29 |
+
self.src = '/home/wzielonka/datasets/D3DFACS/'
|
30 |
+
|
31 |
+
def get_images(self):
|
32 |
+
images = {}
|
33 |
+
for file in sorted(glob(self.get_src() + 'processed/images/*')):
|
34 |
+
actor = Path(file).stem
|
35 |
+
images[actor] = glob(f'{file}/*.jpg')
|
36 |
+
|
37 |
+
return images
|
38 |
+
|
39 |
+
def get_flame_params(self):
|
40 |
+
params = {}
|
41 |
+
for file in sorted(glob(self.get_src() + 'processed/FLAME/*.npz')):
|
42 |
+
actor = Path(file).stem
|
43 |
+
params[actor] = [file]
|
44 |
+
|
45 |
+
return params
|
46 |
+
|
47 |
+
def get_registrations(self):
|
48 |
+
registrations = {}
|
49 |
+
for file in sorted(glob(self.get_src() + 'processed/registrations/*')):
|
50 |
+
actor = Path(file).stem.split('_')[0]
|
51 |
+
registrations[actor] = [file]
|
52 |
+
|
53 |
+
return registrations
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/facewarehouse.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
from abc import ABC
|
19 |
+
from glob import glob
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
from datasets.creation.instances.instance import Instance
|
23 |
+
|
24 |
+
|
25 |
+
class FaceWarehouse(Instance, ABC):
|
26 |
+
def __init__(self):
|
27 |
+
super(FaceWarehouse, self).__init__()
|
28 |
+
self.dst = '/scratch/NFC/OnFlame/FACEWAREHOUSE/'
|
29 |
+
self.src = '/scratch/NFC/FaceWarehouse/'
|
30 |
+
|
31 |
+
def get_images(self):
|
32 |
+
images = {}
|
33 |
+
for actor in sorted(glob(self.get_src() + 'Images/*')):
|
34 |
+
images[Path(actor).stem] = glob(f'{actor}/*.png')
|
35 |
+
|
36 |
+
return images
|
37 |
+
|
38 |
+
def get_flame_params(self):
|
39 |
+
params = {}
|
40 |
+
for actor in sorted(glob(self.get_src() + 'FLAME_fits/*')):
|
41 |
+
params[Path(actor).stem] = [sorted(glob(f'{actor}/*.npz'))[0]]
|
42 |
+
|
43 |
+
return params
|
44 |
+
|
45 |
+
def get_registrations(self):
|
46 |
+
registrations = {}
|
47 |
+
for actor in sorted(glob(self.get_src() + 'FLAME_fits/*')):
|
48 |
+
registrations[Path(actor).stem] = [f'{actor}/tmp/pose_0__def_trafo_fit.obj']
|
49 |
+
|
50 |
+
return registrations
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/florence.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
from abc import ABC
|
19 |
+
from glob import glob
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
|
24 |
+
from datasets.creation.instances.instance import Instance
|
25 |
+
|
26 |
+
|
27 |
+
class Florence(Instance, ABC):
|
28 |
+
def __init__(self):
|
29 |
+
super(Florence, self).__init__()
|
30 |
+
self.dst = '/scratch/NFC/OnFlame/FLORENCE/'
|
31 |
+
self.src = '/scratch/NFC/MICC_Florence/'
|
32 |
+
|
33 |
+
def get_min_det_score(self):
|
34 |
+
return 0.85
|
35 |
+
|
36 |
+
def get_images(self):
|
37 |
+
images = {}
|
38 |
+
for actor in sorted(glob(self.get_src() + 'images/*')):
|
39 |
+
imgs = sorted(list(filter(lambda f: 'PTZ-Outdoor' not in f, glob(f'{actor}/*/*.jpg'))))
|
40 |
+
indecies = np.random.choice(len(imgs), 1000, replace=False)
|
41 |
+
images[Path(actor).stem] = [imgs[i] for i in indecies]
|
42 |
+
|
43 |
+
return images
|
44 |
+
|
45 |
+
def get_flame_params(self):
|
46 |
+
params = {}
|
47 |
+
for actor in sorted(glob(self.get_src() + 'FLAME_parameters/iter1/*')):
|
48 |
+
params[Path(actor).stem] = glob(f'{actor}/*.npz')
|
49 |
+
|
50 |
+
return params
|
51 |
+
|
52 |
+
def get_registrations(self):
|
53 |
+
registrations = {}
|
54 |
+
for actor in sorted(glob(self.get_src() + 'registrations/iter1/*')):
|
55 |
+
if 'rendering' in actor:
|
56 |
+
continue
|
57 |
+
registrations[Path(actor).stem] = glob(f'{actor}/*.obj')
|
58 |
+
|
59 |
+
return registrations
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/frgc.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
from abc import ABC
|
19 |
+
from glob import glob
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
from pytorch3d.io import load_objs_as_meshes
|
24 |
+
|
25 |
+
from datasets.creation.instances.instance import Instance
|
26 |
+
|
27 |
+
|
28 |
+
class FRGC(Instance, ABC):
|
29 |
+
def __init__(self):
|
30 |
+
super(FRGC, self).__init__()
|
31 |
+
self.dst = '/scratch/NFC/OnFlame/FRGC/'
|
32 |
+
self.src = '/scratch/NFC/FRGC_v2/'
|
33 |
+
|
34 |
+
def get_images(self):
|
35 |
+
images = {}
|
36 |
+
for actor in sorted(glob(self.get_src() + 'images/*')):
|
37 |
+
imgs = list(filter(lambda f: 'Spring2003range' not in f, glob(f'/{actor}/*/*.jpg')))
|
38 |
+
images[Path(actor).name] = imgs
|
39 |
+
|
40 |
+
return images
|
41 |
+
|
42 |
+
def get_flame_params(self):
|
43 |
+
prams = {}
|
44 |
+
for actor in sorted(glob(self.get_src() + 'FLAME_parameters/*')):
|
45 |
+
prams[Path(actor).name] = glob(f'/{actor}/*.npz')
|
46 |
+
|
47 |
+
return prams
|
48 |
+
|
49 |
+
def get_registrations(self):
|
50 |
+
registrations = {}
|
51 |
+
for actor in sorted(glob(self.get_src() + 'registrations/*')):
|
52 |
+
registrations[Path(actor).name] = glob(f'/{actor}/*.obj')
|
53 |
+
|
54 |
+
return registrations
|
55 |
+
|
56 |
+
def get_meshes(self):
|
57 |
+
meshes = {}
|
58 |
+
for file in sorted(glob(self.get_src() + 'registrations_tmp_new/*')):
|
59 |
+
meshes[Path(file).name] = glob(f'/{file}/*.obj')
|
60 |
+
|
61 |
+
sessions = np.load('/home/wzielonka/documents/scans_to_session.npy', allow_pickle=True)[()]
|
62 |
+
valid = []
|
63 |
+
for key in sessions.keys():
|
64 |
+
if 'Spring2003range' not in sessions[key]:
|
65 |
+
valid.append(key)
|
66 |
+
|
67 |
+
filtered = {}
|
68 |
+
for actor in meshes.keys():
|
69 |
+
files = meshes[actor]
|
70 |
+
selected = list(filter(lambda f: Path(f).stem in valid, files))
|
71 |
+
if len(selected) > 0:
|
72 |
+
filtered[actor] = selected
|
73 |
+
|
74 |
+
return filtered
|
75 |
+
|
76 |
+
def transform_mesh(self, path):
|
77 |
+
self.update_obj(path[0])
|
78 |
+
mesh = load_objs_as_meshes(path, device=self.device)
|
79 |
+
mesh.scale_verts_(10.0)
|
80 |
+
vertices = mesh._verts_list[0]
|
81 |
+
center = vertices.mean(0)
|
82 |
+
mesh._verts_list = [vertices - center]
|
83 |
+
|
84 |
+
return mesh.clone()
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/instance.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
import os
|
19 |
+
from abc import abstractmethod
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
from pytorch3d.transforms import RotateAxisAngle
|
23 |
+
|
24 |
+
|
25 |
+
class Instance:
|
26 |
+
def __init__(self):
|
27 |
+
self.mount = '/home/wzielonka/Cluster/lustre'
|
28 |
+
self.dst = 'empty'
|
29 |
+
self.src = 'empty'
|
30 |
+
self.device = 'cuda:0'
|
31 |
+
self.actors = []
|
32 |
+
self.use_mount = os.path.exists(self.mount)
|
33 |
+
|
34 |
+
def get_dst(self):
|
35 |
+
return self.dst if not self.use_mount else self.mount + self.dst
|
36 |
+
|
37 |
+
def get_src(self):
|
38 |
+
return self.src if not self.use_mount else self.mount + self.src
|
39 |
+
|
40 |
+
@abstractmethod
|
41 |
+
def get_min_det_score(self):
|
42 |
+
return 0
|
43 |
+
|
44 |
+
@abstractmethod
|
45 |
+
def preprocess(self):
|
46 |
+
pass
|
47 |
+
|
48 |
+
@abstractmethod
|
49 |
+
def get_images(self):
|
50 |
+
return {}
|
51 |
+
|
52 |
+
@abstractmethod
|
53 |
+
def get_flame_params(self):
|
54 |
+
return {}
|
55 |
+
|
56 |
+
@abstractmethod
|
57 |
+
def get_registrations(self):
|
58 |
+
return {}
|
59 |
+
|
60 |
+
@abstractmethod
|
61 |
+
def get_meshes(self):
|
62 |
+
return {}
|
63 |
+
|
64 |
+
@abstractmethod
|
65 |
+
def transform_mesh(self, path):
|
66 |
+
return None
|
67 |
+
|
68 |
+
@abstractmethod
|
69 |
+
def transform_image(self, img):
|
70 |
+
return [img]
|
71 |
+
|
72 |
+
@abstractmethod
|
73 |
+
def transform_path(self, file):
|
74 |
+
return Path(file).name
|
75 |
+
|
76 |
+
@abstractmethod
|
77 |
+
def get_rotations(self):
|
78 |
+
rots = {}
|
79 |
+
degree = 2.5
|
80 |
+
step = int(15 / degree / 2)
|
81 |
+
X = range(-step, step + 1)
|
82 |
+
degree = 8.0
|
83 |
+
step = int(144 / degree / 2)
|
84 |
+
Y = range(-step, step + 1)
|
85 |
+
for a, angles in [('X', X), ('Y', Y)]:
|
86 |
+
r = []
|
87 |
+
for i in angles:
|
88 |
+
r.append((RotateAxisAngle(float(degree * i), axis=a, device=self.device), float(degree * i)))
|
89 |
+
rots[a] = r
|
90 |
+
return rots
|
91 |
+
|
92 |
+
@abstractmethod
|
93 |
+
def update_obj(self, path, fix_mtl=False):
|
94 |
+
mesh = Path(path).stem
|
95 |
+
with open(path, 'r') as file:
|
96 |
+
filedata = file.readlines()
|
97 |
+
|
98 |
+
input = []
|
99 |
+
for line in filedata:
|
100 |
+
if 'usemtl' in line or 'newmtl' in line:
|
101 |
+
continue
|
102 |
+
input.append(line)
|
103 |
+
|
104 |
+
output = []
|
105 |
+
for line in input:
|
106 |
+
if 'mtllib' in line:
|
107 |
+
mtl = line.split(' ')[-1].split('.')[0]
|
108 |
+
line += f'usemtl {mtl}\n'
|
109 |
+
output.append(line)
|
110 |
+
with open(path, 'w') as file:
|
111 |
+
file_lines = "".join(output)
|
112 |
+
file.write(file_lines)
|
113 |
+
|
114 |
+
if not fix_mtl:
|
115 |
+
return
|
116 |
+
|
117 |
+
with open(path.replace('obj', 'mtl'), 'r') as file:
|
118 |
+
filedata = file.readlines()
|
119 |
+
|
120 |
+
output = []
|
121 |
+
for line in filedata:
|
122 |
+
if 'newmtl' in line:
|
123 |
+
line = 'newmtl ' + mesh + '\n'
|
124 |
+
output.append(line)
|
125 |
+
with open(path.replace('obj', 'mtl'), 'w') as file:
|
126 |
+
file_lines = "".join(output)
|
127 |
+
file.write(file_lines)
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/lyhm.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
from abc import ABC
|
19 |
+
from glob import glob
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
from PIL import ImageFile
|
23 |
+
|
24 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
25 |
+
from pytorch3d.io import load_objs_as_meshes
|
26 |
+
from pytorch3d.transforms import RotateAxisAngle
|
27 |
+
|
28 |
+
from datasets.creation.instances.instance import Instance
|
29 |
+
|
30 |
+
|
31 |
+
class LYHM(Instance, ABC):
|
32 |
+
def __init__(self):
|
33 |
+
super(LYHM, self).__init__()
|
34 |
+
self.dst = '/scratch/NFC/MICA/LYHM/'
|
35 |
+
self.src = '/scratch/NFC/LYHM/'
|
36 |
+
|
37 |
+
def get_images(self):
|
38 |
+
images = {}
|
39 |
+
for actor in sorted(glob(self.get_src() + '/*')):
|
40 |
+
images[Path(actor).name] = glob(f'/{actor}/*.png')
|
41 |
+
|
42 |
+
return images
|
43 |
+
|
44 |
+
def get_flame_params(self):
|
45 |
+
prams = {}
|
46 |
+
for actor in sorted(glob(self.get_src() + '/*')):
|
47 |
+
prams[Path(actor).name] = glob(f'/{actor}/*.npz')
|
48 |
+
|
49 |
+
return prams
|
50 |
+
|
51 |
+
def get_registrations(self):
|
52 |
+
registrations = {}
|
53 |
+
for actor in sorted(glob(self.get_src() + '/*')):
|
54 |
+
all = glob(f'/{actor}/*.obj')
|
55 |
+
registrations[Path(actor).name] = list(filter(lambda m: 'model_fit' not in m, all))
|
56 |
+
|
57 |
+
return registrations
|
58 |
+
|
59 |
+
def get_meshes(self):
|
60 |
+
meshes = {}
|
61 |
+
for actor in sorted(glob(self.get_src() + '/*')):
|
62 |
+
meshes[Path(actor).name] = glob(f'/{actor}/scan/*.obj')
|
63 |
+
|
64 |
+
return meshes
|
65 |
+
|
66 |
+
def transform_mesh(self, path):
|
67 |
+
mesh = load_objs_as_meshes(path, device=self.device)
|
68 |
+
vertices = mesh._verts_list[0]
|
69 |
+
center = vertices.mean(0)
|
70 |
+
mesh._verts_list = [vertices - center]
|
71 |
+
mesh.scale_verts_(0.01)
|
72 |
+
|
73 |
+
rot = RotateAxisAngle(-45, axis='X', device=self.device)
|
74 |
+
mesh._verts_list = [rot.transform_points(mesh.verts_list()[0])]
|
75 |
+
rot = RotateAxisAngle(-45, axis='Y', device=self.device)
|
76 |
+
mesh._verts_list = [rot.transform_points(mesh.verts_list()[0])]
|
77 |
+
|
78 |
+
return mesh.clone()
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/pb4d.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
from abc import ABC
|
19 |
+
from glob import glob
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
from pytorch3d.io import load_objs_as_meshes
|
24 |
+
|
25 |
+
from datasets.creation.instances.instance import Instance
|
26 |
+
|
27 |
+
|
28 |
+
class PB4D(Instance, ABC):
|
29 |
+
def __init__(self):
|
30 |
+
super(PB4D, self).__init__()
|
31 |
+
self.dst = '/scratch/NFC/OnFlame/BP4D/'
|
32 |
+
self.src = '/scratch/NFC/BP4D/'
|
33 |
+
|
34 |
+
def get_images(self):
|
35 |
+
images = {}
|
36 |
+
for actor in sorted(glob(self.get_src() + 'images/*')):
|
37 |
+
imgs = sorted(glob(f'/{actor}/*.jpg'))
|
38 |
+
indecies = np.random.choice(len(imgs), 100, replace=False)
|
39 |
+
images[Path(actor).name] = [imgs[i] for i in indecies]
|
40 |
+
|
41 |
+
return images
|
42 |
+
|
43 |
+
def get_flame_params(self):
|
44 |
+
prams = {}
|
45 |
+
for file in sorted(glob(self.get_src() + 'FLAME_parameters/*.npz')):
|
46 |
+
prams[Path(file).stem] = [file]
|
47 |
+
|
48 |
+
return prams
|
49 |
+
|
50 |
+
def get_registrations(self):
|
51 |
+
registrations = {}
|
52 |
+
for file in sorted(glob(self.get_src() + 'registrations/*')):
|
53 |
+
registrations[Path(file).stem] = [file]
|
54 |
+
|
55 |
+
return registrations
|
56 |
+
|
57 |
+
def get_meshes(self):
|
58 |
+
meshes = {}
|
59 |
+
for file in sorted(glob(self.get_src() + 'scans/*.obj')):
|
60 |
+
meshes[Path(file).stem] = [file]
|
61 |
+
|
62 |
+
return meshes
|
63 |
+
|
64 |
+
def transform_mesh(self, path):
|
65 |
+
mesh = load_objs_as_meshes(path, device=self.device)
|
66 |
+
mesh.scale_verts_(0.01)
|
67 |
+
vertices = mesh._verts_list[0]
|
68 |
+
center = vertices.mean(0)
|
69 |
+
mesh._verts_list = [vertices - center]
|
70 |
+
|
71 |
+
return mesh.clone()
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/instances/stirling.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
from abc import ABC
|
19 |
+
from glob import glob
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
from pytorch3d.io import load_objs_as_meshes
|
23 |
+
|
24 |
+
from datasets.creation.instances.instance import Instance
|
25 |
+
|
26 |
+
|
27 |
+
class Stirling(Instance, ABC):
|
28 |
+
def __init__(self):
|
29 |
+
super(Stirling, self).__init__()
|
30 |
+
self.dst = '/scratch/NFC/OnFlame/STIRLING/'
|
31 |
+
self.src = '/scratch/NFC/Stirling/'
|
32 |
+
|
33 |
+
def get_min_det_score(self):
|
34 |
+
return 0.75
|
35 |
+
|
36 |
+
def get_images(self):
|
37 |
+
images = {}
|
38 |
+
for file in sorted(glob(self.get_src() + 'images/Real_images__Subset_2D_FG2018/HQ/*')):
|
39 |
+
actor = Path(file).stem.split('_')[0].upper()
|
40 |
+
if actor not in images:
|
41 |
+
images[actor] = []
|
42 |
+
images[actor].append(file)
|
43 |
+
|
44 |
+
return images
|
45 |
+
|
46 |
+
def get_flame_params(self):
|
47 |
+
prams = {}
|
48 |
+
for file in sorted(glob(self.get_src() + 'FLAME_parameters/iter1/*/*.npz')):
|
49 |
+
actor = Path(file).stem[0:5].upper()
|
50 |
+
prams[Path(actor).name] = [file]
|
51 |
+
|
52 |
+
return prams
|
53 |
+
|
54 |
+
def get_registrations(self):
|
55 |
+
registrations = {}
|
56 |
+
for file in sorted(glob(self.get_src() + 'registrations/iter1/*/*')):
|
57 |
+
if 'obj' not in file:
|
58 |
+
continue
|
59 |
+
actor = Path(file).stem[0:5].upper()
|
60 |
+
registrations[Path(actor).name] = [file]
|
61 |
+
|
62 |
+
return registrations
|
63 |
+
|
64 |
+
def get_meshes(self):
|
65 |
+
meshes = {}
|
66 |
+
for file in sorted(glob(self.get_src() + 'scans/*/*.obj')):
|
67 |
+
actor = Path(file).stem[0:5].upper()
|
68 |
+
if 'obj' in file:
|
69 |
+
meshes[actor] = file
|
70 |
+
|
71 |
+
return meshes
|
72 |
+
|
73 |
+
def transform_mesh(self, path):
|
74 |
+
self.update_obj(path, fix_mtl=True)
|
75 |
+
mesh = load_objs_as_meshes([path], device=self.device)
|
76 |
+
vertices = mesh._verts_list[0]
|
77 |
+
center = vertices.mean(0)
|
78 |
+
mesh._verts_list = [vertices - center]
|
79 |
+
mesh.scale_verts_(0.01)
|
80 |
+
|
81 |
+
return mesh.clone()
|
82 |
+
|
83 |
+
def transform_path(self, file):
|
84 |
+
name = Path(file).name
|
85 |
+
return name
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/main.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
import torch
|
20 |
+
|
21 |
+
from datasets.creation.generator import Generator
|
22 |
+
from datasets.creation.instances.bu3dfe import BU3DFE
|
23 |
+
from datasets.creation.instances.d3dfacs import D3DFACS
|
24 |
+
from datasets.creation.instances.facewarehouse import FaceWarehouse
|
25 |
+
from datasets.creation.instances.florence import Florence
|
26 |
+
from datasets.creation.instances.frgc import FRGC
|
27 |
+
from datasets.creation.instances.lyhm import LYHM
|
28 |
+
from datasets.creation.instances.pb4d import PB4D
|
29 |
+
from datasets.creation.instances.stirling import Stirling
|
30 |
+
|
31 |
+
np.random.seed(42)
|
32 |
+
|
33 |
+
if __name__ == '__main__':
|
34 |
+
torch.multiprocessing.set_start_method('spawn')
|
35 |
+
|
36 |
+
datasets = [FaceWarehouse(), LYHM(), D3DFACS(), FRGC(), Florence(), Stirling(), BU3DFE(), PB4D()]
|
37 |
+
generator = Generator([FaceWarehouse()])
|
38 |
+
|
39 |
+
generator.run()
|
src/pixel3dmm/preprocessing/MICA/datasets/creation/util.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
import os
|
19 |
+
import os.path as osp
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
import cv2
|
23 |
+
import numpy as np
|
24 |
+
from insightface.utils import face_align
|
25 |
+
from numpy.lib import math
|
26 |
+
|
27 |
+
input_mean = 127.5
|
28 |
+
input_std = 127.5
|
29 |
+
|
30 |
+
|
31 |
+
def create_folders(folders):
|
32 |
+
if not type(folders) is list:
|
33 |
+
folders = folders.split('/')
|
34 |
+
parents = '/'
|
35 |
+
for folder in folders:
|
36 |
+
parents = os.path.join(parents, folder)
|
37 |
+
if os.path.exists(parents):
|
38 |
+
continue
|
39 |
+
Path(parents).mkdir(exist_ok=True)
|
40 |
+
|
41 |
+
|
42 |
+
def get_arcface_input(face, img):
|
43 |
+
aimg = face_align.norm_crop(img, landmark=face.kps)
|
44 |
+
blob = cv2.dnn.blobFromImages([aimg], 1.0 / input_std, (112, 112), (input_mean, input_mean, input_mean), swapRB=True)
|
45 |
+
return blob[0], aimg
|
46 |
+
|
47 |
+
|
48 |
+
def get_image(name, to_rgb=False):
|
49 |
+
images_dir = osp.join(Path(__file__).parent.absolute(), '../images')
|
50 |
+
ext_names = ['.jpg', '.png', '.jpeg']
|
51 |
+
image_file = None
|
52 |
+
for ext_name in ext_names:
|
53 |
+
_image_file = osp.join(images_dir, "%s%s" % (name, ext_name))
|
54 |
+
if osp.exists(_image_file):
|
55 |
+
image_file = _image_file
|
56 |
+
break
|
57 |
+
assert image_file is not None, '%s not found' % name
|
58 |
+
img = cv2.imread(image_file)
|
59 |
+
if to_rgb:
|
60 |
+
img = img[:, :, ::-1]
|
61 |
+
return img
|
62 |
+
|
63 |
+
|
64 |
+
# from the original insightface.app.face_analysis.py file
|
65 |
+
def draw_on(img, faces):
|
66 |
+
import cv2
|
67 |
+
dimg = img.copy()
|
68 |
+
for i in range(len(faces)):
|
69 |
+
face = faces[i]
|
70 |
+
box = face.bbox.astype(np.int)
|
71 |
+
color = (0, 0, 255)
|
72 |
+
cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2)
|
73 |
+
if face.kps is not None:
|
74 |
+
kps = face.kps.astype(np.int)
|
75 |
+
# print(landmark.shape)
|
76 |
+
for l in range(kps.shape[0]):
|
77 |
+
color = (0, 0, 255)
|
78 |
+
if l == 0 or l == 3:
|
79 |
+
color = (0, 255, 0)
|
80 |
+
cv2.circle(dimg, (kps[l][0], kps[l][1]), 1, color,
|
81 |
+
2)
|
82 |
+
if face.gender is not None and face.age is not None:
|
83 |
+
cv2.putText(dimg, '%s,%d' % (face.sex, face.age), (box[0] - 1, box[1] - 4), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 1)
|
84 |
+
|
85 |
+
return dimg
|
86 |
+
|
87 |
+
|
88 |
+
def dist(p1, p2):
|
89 |
+
return math.sqrt(((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))
|
90 |
+
|
91 |
+
|
92 |
+
def get_center(bboxes, img):
|
93 |
+
img_center = img.shape[1] // 2, img.shape[0] // 2
|
94 |
+
size = bboxes.shape[0]
|
95 |
+
distance = np.Inf
|
96 |
+
j = 0
|
97 |
+
for i in range(size):
|
98 |
+
x1, y1, x2, y2 = bboxes[i, 0:4]
|
99 |
+
dx = abs(x2 - x1) / 2.0
|
100 |
+
dy = abs(y2 - y1) / 2.0
|
101 |
+
current = dist((x1 + dx, y1 + dy), img_center)
|
102 |
+
if current < distance:
|
103 |
+
distance = current
|
104 |
+
j = i
|
105 |
+
|
106 |
+
return j
|
107 |
+
|
108 |
+
|
109 |
+
def bbox2point(left, right, top, bottom, type='bbox'):
|
110 |
+
if type == 'kpt68':
|
111 |
+
old_size = (right - left + bottom - top) / 2 * 1.1
|
112 |
+
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])
|
113 |
+
elif type == 'bbox':
|
114 |
+
old_size = (right - left + bottom - top) / 2
|
115 |
+
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.12])
|
116 |
+
else:
|
117 |
+
raise NotImplementedError
|
118 |
+
return old_size, center
|
119 |
+
|
120 |
+
|
121 |
+
def get_bbox(image, lmks, bb_scale=1.0):
|
122 |
+
h, w, c = image.shape
|
123 |
+
bbox = []
|
124 |
+
for i in range(lmks.shape[0]):
|
125 |
+
lmks = lmks.astype(np.int32)
|
126 |
+
x_min, x_max, y_min, y_max = np.min(lmks[i, :, 0]), np.max(lmks[i, :, 0]), np.min(lmks[i, :, 1]), np.max(lmks[i, :, 1])
|
127 |
+
x_center, y_center = int((x_max + x_min) / 2.0), int((y_max + y_min) / 2.0)
|
128 |
+
size = int(bb_scale * 2 * max(x_center - x_min, y_center - y_min))
|
129 |
+
xb_min, xb_max, yb_min, yb_max = max(x_center - size // 2, 0), min(x_center + size // 2, w - 1), \
|
130 |
+
max(y_center - size // 2, 0), min(y_center + size // 2, h - 1)
|
131 |
+
|
132 |
+
yb_max = min(yb_max, h - 1)
|
133 |
+
xb_max = min(xb_max, w - 1)
|
134 |
+
yb_min = max(yb_min, 0)
|
135 |
+
xb_min = max(xb_min, 0)
|
136 |
+
|
137 |
+
if (xb_max - xb_min) % 2 != 0:
|
138 |
+
xb_min += 1
|
139 |
+
|
140 |
+
if (yb_max - yb_min) % 2 != 0:
|
141 |
+
yb_min += 1
|
142 |
+
|
143 |
+
# x1, y1, x2, y2
|
144 |
+
bbox.append(np.array([xb_min, yb_min, xb_max, yb_max, 0]))
|
145 |
+
|
146 |
+
return np.stack(bbox)
|
src/pixel3dmm/preprocessing/MICA/datasets/image_paths/BP4D.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e625d679f72e3aa2e00ac4faed0b07d6957c94986a96746a4f8de29997e77bba
|
3 |
+
size 284773
|
src/pixel3dmm/preprocessing/MICA/datasets/image_paths/BU3DFE.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3f144740162e25f9067a73f298e8229de60a17eccf5ba5e56ab8f0789688518e
|
3 |
+
size 196059
|
src/pixel3dmm/preprocessing/MICA/datasets/image_paths/D3DFACS.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0270c09ae15eec72a94cfbc3a1502b970330698e290a10f2c4b8176341f4ae87
|
3 |
+
size 207899
|
src/pixel3dmm/preprocessing/MICA/datasets/image_paths/FACEWAREHOUSE.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2e7e9f2bce6fce23e4f5861ebe71e0402bafdf6383217a9d20e1130787b01d3b
|
3 |
+
size 101433
|
src/pixel3dmm/preprocessing/MICA/datasets/image_paths/FLORENCE.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:794583b78501cda2658aa1fac4061ac7f75271ee35d71f090127f3935491ce42
|
3 |
+
size 500567
|
src/pixel3dmm/preprocessing/MICA/datasets/image_paths/FRGC.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dfa3e47a66f6f79b2a44091322c5b5680a32018774c2121d7e53f0f72a358c28
|
3 |
+
size 1232221
|
src/pixel3dmm/preprocessing/MICA/datasets/image_paths/LYHM.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab68b388abd4b5cf2dbc8cf46058ba8c26a171b8bbbff06d20082ba1b2780974
|
3 |
+
size 126716
|
src/pixel3dmm/preprocessing/MICA/datasets/image_paths/STIRLING.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4fcd43ec4af033f908c660a825a948621d071ac9fa7f0292943c3519dfd6805c
|
3 |
+
size 23761
|
src/pixel3dmm/preprocessing/MICA/demo.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import os
|
20 |
+
import random
|
21 |
+
from glob import glob
|
22 |
+
from pathlib import Path
|
23 |
+
|
24 |
+
import cv2
|
25 |
+
import numpy as np
|
26 |
+
import torch
|
27 |
+
import torch.backends.cudnn as cudnn
|
28 |
+
import trimesh
|
29 |
+
from insightface.app.common import Face
|
30 |
+
from insightface.utils import face_align
|
31 |
+
from loguru import logger
|
32 |
+
from skimage.io import imread
|
33 |
+
from tqdm import tqdm
|
34 |
+
|
35 |
+
from configs.config import get_cfg_defaults
|
36 |
+
from datasets.creation.util import get_arcface_input, get_center, draw_on
|
37 |
+
from utils import util
|
38 |
+
from utils.landmark_detector import LandmarksDetector, detectors
|
39 |
+
|
40 |
+
|
41 |
+
def deterministic(rank):
|
42 |
+
torch.manual_seed(rank)
|
43 |
+
torch.cuda.manual_seed(rank)
|
44 |
+
np.random.seed(rank)
|
45 |
+
random.seed(rank)
|
46 |
+
|
47 |
+
cudnn.deterministic = True
|
48 |
+
cudnn.benchmark = False
|
49 |
+
|
50 |
+
|
51 |
+
def process(args, app, image_size=224, draw_bbox=False):
|
52 |
+
dst = Path(args.a)
|
53 |
+
dst.mkdir(parents=True, exist_ok=True)
|
54 |
+
processes = []
|
55 |
+
image_paths = sorted(glob(args.i + '/*.*'))
|
56 |
+
for image_path in tqdm(image_paths):
|
57 |
+
name = Path(image_path).stem
|
58 |
+
img = cv2.imread(image_path)
|
59 |
+
bboxes, kpss = app.detect(img)
|
60 |
+
if bboxes.shape[0] == 0:
|
61 |
+
logger.error(f'[ERROR] Face not detected for {image_path}')
|
62 |
+
continue
|
63 |
+
i = get_center(bboxes, img)
|
64 |
+
bbox = bboxes[i, 0:4]
|
65 |
+
det_score = bboxes[i, 4]
|
66 |
+
kps = None
|
67 |
+
if kpss is not None:
|
68 |
+
kps = kpss[i]
|
69 |
+
face = Face(bbox=bbox, kps=kps, det_score=det_score)
|
70 |
+
blob, aimg = get_arcface_input(face, img)
|
71 |
+
file = str(Path(dst, name))
|
72 |
+
np.save(file, blob)
|
73 |
+
processes.append(file + '.npy')
|
74 |
+
cv2.imwrite(file + '.jpg', face_align.norm_crop(img, landmark=face.kps, image_size=image_size))
|
75 |
+
if draw_bbox:
|
76 |
+
dimg = draw_on(img, [face])
|
77 |
+
cv2.imwrite(file + '_bbox.jpg', dimg)
|
78 |
+
|
79 |
+
return processes
|
80 |
+
|
81 |
+
|
82 |
+
def to_batch(path):
|
83 |
+
src = path.replace('npy', 'jpg')
|
84 |
+
if not os.path.exists(src):
|
85 |
+
src = path.replace('npy', 'png')
|
86 |
+
|
87 |
+
image = imread(src)[:, :, :3]
|
88 |
+
image = image / 255.
|
89 |
+
image = cv2.resize(image, (224, 224)).transpose(2, 0, 1)
|
90 |
+
image = torch.tensor(image).cuda()[None]
|
91 |
+
|
92 |
+
arcface = np.load(path)
|
93 |
+
arcface = torch.tensor(arcface).cuda()[None]
|
94 |
+
|
95 |
+
return image, arcface
|
96 |
+
|
97 |
+
|
98 |
+
def load_checkpoint(args, mica):
|
99 |
+
checkpoint = torch.load(args.m)
|
100 |
+
if 'arcface' in checkpoint:
|
101 |
+
mica.arcface.load_state_dict(checkpoint['arcface'])
|
102 |
+
if 'flameModel' in checkpoint:
|
103 |
+
mica.flameModel.load_state_dict(checkpoint['flameModel'])
|
104 |
+
|
105 |
+
|
106 |
+
def main(cfg, args):
|
107 |
+
device = 'cuda:0'
|
108 |
+
cfg.model.testing = True
|
109 |
+
mica = util.find_model_using_name(model_dir='micalib.models', model_name=cfg.model.name)(cfg, device)
|
110 |
+
load_checkpoint(args, mica)
|
111 |
+
mica.eval()
|
112 |
+
|
113 |
+
faces = mica.flameModel.generator.faces_tensor.cpu()
|
114 |
+
Path(args.o).mkdir(exist_ok=True, parents=True)
|
115 |
+
|
116 |
+
app = LandmarksDetector(model=detectors.RETINAFACE)
|
117 |
+
|
118 |
+
with torch.no_grad():
|
119 |
+
logger.info(f'Processing has started...')
|
120 |
+
paths = process(args, app, draw_bbox=False)
|
121 |
+
for path in tqdm(paths):
|
122 |
+
name = Path(path).stem
|
123 |
+
images, arcface = to_batch(path)
|
124 |
+
codedict = mica.encode(images, arcface)
|
125 |
+
opdict = mica.decode(codedict)
|
126 |
+
meshes = opdict['pred_canonical_shape_vertices']
|
127 |
+
code = opdict['pred_shape_code']
|
128 |
+
lmk = mica.flame.compute_landmarks(meshes)
|
129 |
+
|
130 |
+
mesh = meshes[0]
|
131 |
+
landmark_51 = lmk[0, 17:]
|
132 |
+
landmark_7 = landmark_51[[19, 22, 25, 28, 16, 31, 37]]
|
133 |
+
|
134 |
+
dst = Path(args.o, name)
|
135 |
+
dst.mkdir(parents=True, exist_ok=True)
|
136 |
+
trimesh.Trimesh(vertices=mesh.cpu() * 1000.0, faces=faces, process=False).export(f'{dst}/mesh.ply') # save in millimeters
|
137 |
+
trimesh.Trimesh(vertices=mesh.cpu() * 1000.0, faces=faces, process=False).export(f'{dst}/mesh.obj')
|
138 |
+
np.save(f'{dst}/identity', code[0].cpu().numpy())
|
139 |
+
np.save(f'{dst}/kpt7', landmark_7.cpu().numpy() * 1000.0)
|
140 |
+
np.save(f'{dst}/kpt68', lmk.cpu().numpy() * 1000.0)
|
141 |
+
|
142 |
+
logger.info(f'Processing finished. Results has been saved in {args.o}')
|
143 |
+
|
144 |
+
|
145 |
+
if __name__ == '__main__':
|
146 |
+
parser = argparse.ArgumentParser(description='MICA - Towards Metrical Reconstruction of Human Faces')
|
147 |
+
parser.add_argument('-i', default='demo/input', type=str, help='Input folder with images')
|
148 |
+
parser.add_argument('-o', default='demo/output', type=str, help='Output folder')
|
149 |
+
parser.add_argument('-a', default='demo/arcface', type=str, help='Processed images for MICA input')
|
150 |
+
parser.add_argument('-m', default='data/pretrained/mica.tar', type=str, help='Pretrained model path')
|
151 |
+
|
152 |
+
args = parser.parse_args()
|
153 |
+
cfg = get_cfg_defaults()
|
154 |
+
|
155 |
+
deterministic(42)
|
156 |
+
main(cfg, args)
|
src/pixel3dmm/preprocessing/MICA/demo/input/carell.jpg
ADDED
![]() |
src/pixel3dmm/preprocessing/MICA/demo/input/connelly.jpg
ADDED
![]() |
Git LFS Details
|
src/pixel3dmm/preprocessing/MICA/demo/input/justin.png
ADDED
![]() |
Git LFS Details
|
src/pixel3dmm/preprocessing/MICA/demo/input/lawrence.jpg
ADDED
![]() |
Git LFS Details
|
src/pixel3dmm/preprocessing/MICA/documents/BP4D.gif
ADDED
![]() |
Git LFS Details
|
src/pixel3dmm/preprocessing/MICA/documents/D3DFACS.gif
ADDED
![]() |
Git LFS Details
|
src/pixel3dmm/preprocessing/MICA/documents/FACEWAREHOUSE.gif
ADDED
![]() |
Git LFS Details
|
src/pixel3dmm/preprocessing/MICA/documents/FLORENCE.gif
ADDED
![]() |
Git LFS Details
|