Spaces:
Runtime error
Runtime error
Commit
·
0e6de7e
0
Parent(s):
Duplicate from Gradio-Blocks/DualStyleGAN
Browse filesCo-authored-by: Mamta Narang <[email protected]>
- .gitattributes +28 -0
- .gitmodules +3 -0
- .pre-commit-config.yaml +37 -0
- .style.yapf +5 -0
- DualStyleGAN +1 -0
- README.md +16 -0
- app.py +196 -0
- dualstylegan.py +163 -0
- images/95UF6LXe-Lo.jpg +3 -0
- images/ILip77SbmOE.jpg +3 -0
- images/README.md +7 -0
- images/et_78QkMMQs.jpg +3 -0
- images/rDEOVtE7vOs.jpg +3 -0
- packages.txt +2 -0
- patch +38 -0
- requirements.txt +7 -0
- style.css +17 -0
.gitattributes
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitmodules
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "DualStyleGAN"]
|
2 |
+
path = DualStyleGAN
|
3 |
+
url = https://github.com/williamyang1991/DualStyleGAN
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
exclude: ^patch
|
2 |
+
repos:
|
3 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
4 |
+
rev: v4.2.0
|
5 |
+
hooks:
|
6 |
+
- id: check-executables-have-shebangs
|
7 |
+
- id: check-json
|
8 |
+
- id: check-merge-conflict
|
9 |
+
- id: check-shebang-scripts-are-executable
|
10 |
+
- id: check-toml
|
11 |
+
- id: check-yaml
|
12 |
+
- id: double-quote-string-fixer
|
13 |
+
- id: end-of-file-fixer
|
14 |
+
- id: mixed-line-ending
|
15 |
+
args: ['--fix=lf']
|
16 |
+
- id: requirements-txt-fixer
|
17 |
+
- id: trailing-whitespace
|
18 |
+
- repo: https://github.com/myint/docformatter
|
19 |
+
rev: v1.4
|
20 |
+
hooks:
|
21 |
+
- id: docformatter
|
22 |
+
args: ['--in-place']
|
23 |
+
- repo: https://github.com/pycqa/isort
|
24 |
+
rev: 5.12.0
|
25 |
+
hooks:
|
26 |
+
- id: isort
|
27 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
28 |
+
rev: v0.991
|
29 |
+
hooks:
|
30 |
+
- id: mypy
|
31 |
+
args: ['--ignore-missing-imports']
|
32 |
+
additional_dependencies: ['types-python-slugify']
|
33 |
+
- repo: https://github.com/google/yapf
|
34 |
+
rev: v0.32.0
|
35 |
+
hooks:
|
36 |
+
- id: yapf
|
37 |
+
args: ['--parallel', '--in-place']
|
.style.yapf
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[style]
|
2 |
+
based_on_style = pep8
|
3 |
+
blank_line_before_nested_class_or_def = false
|
4 |
+
spaces_before_comment = 2
|
5 |
+
split_before_logical_operator = true
|
DualStyleGAN
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 64285b179d0929e301a97c2f2c438546ff49e20d
|
README.md
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Portrait Style Transfer
|
3 |
+
emoji: 😻
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: red
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.35.2
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
suggested_hardware: t4-small
|
11 |
+
duplicated_from: Gradio-Blocks/DualStyleGAN
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
15 |
+
|
16 |
+
https://arxiv.org/abs/2203.13248
|
app.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
import pathlib
|
7 |
+
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
from dualstylegan import Model
|
11 |
+
|
12 |
+
DESCRIPTION = '''# Portrait Style Transfer with <a href="https://github.com/williamyang1991/DualStyleGAN">DualStyleGAN</a>
|
13 |
+
|
14 |
+
<img id="overview" alt="overview" src="https://raw.githubusercontent.com/williamyang1991/DualStyleGAN/main/doc_images/overview.jpg" />
|
15 |
+
'''
|
16 |
+
|
17 |
+
|
18 |
+
def get_style_image_url(style_name: str) -> str:
|
19 |
+
base_url = 'https://raw.githubusercontent.com/williamyang1991/DualStyleGAN/main/doc_images'
|
20 |
+
filenames = {
|
21 |
+
'cartoon': 'cartoon_overview.jpg',
|
22 |
+
'caricature': 'caricature_overview.jpg',
|
23 |
+
'anime': 'anime_overview.jpg',
|
24 |
+
'arcane': 'Reconstruction_arcane_overview.jpg',
|
25 |
+
'comic': 'Reconstruction_comic_overview.jpg',
|
26 |
+
'pixar': 'Reconstruction_pixar_overview.jpg',
|
27 |
+
'slamdunk': 'Reconstruction_slamdunk_overview.jpg',
|
28 |
+
}
|
29 |
+
return f'{base_url}/{filenames[style_name]}'
|
30 |
+
|
31 |
+
|
32 |
+
def get_style_image_markdown_text(style_name: str) -> str:
|
33 |
+
url = get_style_image_url(style_name)
|
34 |
+
return f'<center><img id="style-image" src="{url}" alt="style image"></center>'
|
35 |
+
|
36 |
+
|
37 |
+
def update_slider(choice: str) -> dict:
|
38 |
+
max_vals = {
|
39 |
+
'cartoon': 316,
|
40 |
+
'caricature': 198,
|
41 |
+
'anime': 173,
|
42 |
+
'arcane': 99,
|
43 |
+
'comic': 100,
|
44 |
+
'pixar': 121,
|
45 |
+
'slamdunk': 119,
|
46 |
+
}
|
47 |
+
return gr.Slider.update(maximum=max_vals[choice])
|
48 |
+
|
49 |
+
|
50 |
+
def update_style_image(style_name: str) -> dict:
|
51 |
+
text = get_style_image_markdown_text(style_name)
|
52 |
+
return gr.Markdown.update(value=text)
|
53 |
+
|
54 |
+
|
55 |
+
def set_example_image(example: list) -> dict:
|
56 |
+
return gr.Image.update(value=example[0])
|
57 |
+
|
58 |
+
|
59 |
+
def set_example_styles(example: list) -> list[dict]:
|
60 |
+
return [
|
61 |
+
gr.Radio.update(value=example[0]),
|
62 |
+
gr.Slider.update(value=example[1]),
|
63 |
+
]
|
64 |
+
|
65 |
+
|
66 |
+
def set_example_weights(example: list) -> list[dict]:
|
67 |
+
return [
|
68 |
+
gr.Slider.update(value=example[0]),
|
69 |
+
gr.Slider.update(value=example[1]),
|
70 |
+
]
|
71 |
+
|
72 |
+
|
73 |
+
model = Model()
|
74 |
+
|
75 |
+
with gr.Blocks(css='style.css') as demo:
|
76 |
+
gr.Markdown(DESCRIPTION)
|
77 |
+
|
78 |
+
with gr.Box():
|
79 |
+
gr.Markdown('''## Step 1 (Preprocess Input Image)
|
80 |
+
|
81 |
+
- Drop an image containing a near-frontal face to the **Input Image**.
|
82 |
+
- If there are multiple faces in the image, hit the Edit button in the upper right corner and crop the input image beforehand.
|
83 |
+
- Hit the **Detect & Align Face** button.
|
84 |
+
- Hit the **Reconstruct Face** button.
|
85 |
+
- The final result will be based on this **Reconstructed Face**. So, if the reconstructed image is not satisfactory, you may want to change the input image.
|
86 |
+
''')
|
87 |
+
with gr.Row():
|
88 |
+
with gr.Column():
|
89 |
+
with gr.Row():
|
90 |
+
input_image = gr.Image(label='Input Image',
|
91 |
+
type='filepath')
|
92 |
+
with gr.Row():
|
93 |
+
detect_button = gr.Button('Detect & Align Face')
|
94 |
+
with gr.Column():
|
95 |
+
with gr.Row():
|
96 |
+
aligned_face = gr.Image(label='Aligned Face',
|
97 |
+
type='numpy',
|
98 |
+
interactive=False)
|
99 |
+
with gr.Row():
|
100 |
+
reconstruct_button = gr.Button('Reconstruct Face')
|
101 |
+
with gr.Column():
|
102 |
+
reconstructed_face = gr.Image(label='Reconstructed Face',
|
103 |
+
type='numpy')
|
104 |
+
instyle = gr.Variable()
|
105 |
+
|
106 |
+
with gr.Row():
|
107 |
+
paths = sorted(pathlib.Path('images').glob('*.jpg'))
|
108 |
+
gr.Examples(examples=[[path.as_posix()] for path in paths],
|
109 |
+
inputs=input_image)
|
110 |
+
|
111 |
+
with gr.Box():
|
112 |
+
gr.Markdown('''## Step 2 (Select Style Image)
|
113 |
+
|
114 |
+
- Select **Style Type**.
|
115 |
+
- Select **Style Image Index** from the image table below.
|
116 |
+
''')
|
117 |
+
with gr.Row():
|
118 |
+
with gr.Column():
|
119 |
+
style_type = gr.Radio(label='Style Type',
|
120 |
+
choices=model.style_types)
|
121 |
+
text = get_style_image_markdown_text('cartoon')
|
122 |
+
style_image = gr.Markdown(value=text)
|
123 |
+
style_index = gr.Slider(label='Style Image Index',
|
124 |
+
minimum=0,
|
125 |
+
maximum=316,
|
126 |
+
step=1,
|
127 |
+
value=26)
|
128 |
+
|
129 |
+
with gr.Row():
|
130 |
+
gr.Examples(examples=[
|
131 |
+
['cartoon', 26],
|
132 |
+
['caricature', 65],
|
133 |
+
['arcane', 63],
|
134 |
+
['pixar', 80],
|
135 |
+
],
|
136 |
+
inputs=[style_type, style_index])
|
137 |
+
|
138 |
+
with gr.Box():
|
139 |
+
gr.Markdown('''## Step 3 (Generate Style Transferred Image)
|
140 |
+
|
141 |
+
- Adjust **Structure Weight** and **Color Weight**.
|
142 |
+
- These are weights for the style image, so the larger the value, the closer the resulting image will be to the style image.
|
143 |
+
- Hit the **Generate** button.
|
144 |
+
''')
|
145 |
+
with gr.Row():
|
146 |
+
with gr.Column():
|
147 |
+
with gr.Row():
|
148 |
+
structure_weight = gr.Slider(label='Structure Weight',
|
149 |
+
minimum=0,
|
150 |
+
maximum=1,
|
151 |
+
step=0.1,
|
152 |
+
value=0.6)
|
153 |
+
with gr.Row():
|
154 |
+
color_weight = gr.Slider(label='Color Weight',
|
155 |
+
minimum=0,
|
156 |
+
maximum=1,
|
157 |
+
step=0.1,
|
158 |
+
value=1)
|
159 |
+
with gr.Row():
|
160 |
+
structure_only = gr.Checkbox(label='Structure Only')
|
161 |
+
with gr.Row():
|
162 |
+
generate_button = gr.Button('Generate')
|
163 |
+
|
164 |
+
with gr.Column():
|
165 |
+
result = gr.Image(label='Result')
|
166 |
+
|
167 |
+
with gr.Row():
|
168 |
+
gr.Examples(examples=[
|
169 |
+
[0.6, 1.0],
|
170 |
+
[0.3, 1.0],
|
171 |
+
[0.0, 1.0],
|
172 |
+
[1.0, 0.0],
|
173 |
+
],
|
174 |
+
inputs=[structure_weight, color_weight])
|
175 |
+
|
176 |
+
detect_button.click(fn=model.detect_and_align_face,
|
177 |
+
inputs=input_image,
|
178 |
+
outputs=aligned_face)
|
179 |
+
reconstruct_button.click(fn=model.reconstruct_face,
|
180 |
+
inputs=aligned_face,
|
181 |
+
outputs=[reconstructed_face, instyle])
|
182 |
+
style_type.change(fn=update_slider, inputs=style_type, outputs=style_index)
|
183 |
+
style_type.change(fn=update_style_image,
|
184 |
+
inputs=style_type,
|
185 |
+
outputs=style_image)
|
186 |
+
generate_button.click(fn=model.generate,
|
187 |
+
inputs=[
|
188 |
+
style_type,
|
189 |
+
style_index,
|
190 |
+
structure_weight,
|
191 |
+
color_weight,
|
192 |
+
structure_only,
|
193 |
+
instyle,
|
194 |
+
],
|
195 |
+
outputs=result)
|
196 |
+
demo.queue(max_size=10).launch()
|
dualstylegan.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import argparse
|
4 |
+
import os
|
5 |
+
import pathlib
|
6 |
+
import shlex
|
7 |
+
import subprocess
|
8 |
+
import sys
|
9 |
+
from typing import Callable
|
10 |
+
|
11 |
+
import dlib
|
12 |
+
import huggingface_hub
|
13 |
+
import numpy as np
|
14 |
+
import PIL.Image
|
15 |
+
import torch
|
16 |
+
import torch.nn as nn
|
17 |
+
import torchvision.transforms as T
|
18 |
+
|
19 |
+
if os.getenv('SYSTEM') == 'spaces' and not torch.cuda.is_available():
|
20 |
+
with open('patch') as f:
|
21 |
+
subprocess.run(shlex.split('patch -p1'), cwd='DualStyleGAN', stdin=f)
|
22 |
+
|
23 |
+
app_dir = pathlib.Path(__file__).parent
|
24 |
+
submodule_dir = app_dir / 'DualStyleGAN'
|
25 |
+
sys.path.insert(0, submodule_dir.as_posix())
|
26 |
+
|
27 |
+
from model.dualstylegan import DualStyleGAN
|
28 |
+
from model.encoder.align_all_parallel import align_face
|
29 |
+
from model.encoder.psp import pSp
|
30 |
+
|
31 |
+
|
32 |
+
class Model:
|
33 |
+
def __init__(self):
|
34 |
+
self.device = torch.device(
|
35 |
+
'cuda:0' if torch.cuda.is_available() else 'cpu')
|
36 |
+
self.landmark_model = self._create_dlib_landmark_model()
|
37 |
+
self.encoder = self._load_encoder()
|
38 |
+
self.transform = self._create_transform()
|
39 |
+
|
40 |
+
self.style_types = [
|
41 |
+
'cartoon',
|
42 |
+
'caricature',
|
43 |
+
'anime',
|
44 |
+
'arcane',
|
45 |
+
'comic',
|
46 |
+
'pixar',
|
47 |
+
'slamdunk',
|
48 |
+
]
|
49 |
+
self.generator_dict = {
|
50 |
+
style_type: self._load_generator(style_type)
|
51 |
+
for style_type in self.style_types
|
52 |
+
}
|
53 |
+
self.exstyle_dict = {
|
54 |
+
style_type: self._load_exstylecode(style_type)
|
55 |
+
for style_type in self.style_types
|
56 |
+
}
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def _create_dlib_landmark_model():
|
60 |
+
path = huggingface_hub.hf_hub_download(
|
61 |
+
'public-data/dlib_face_landmark_model',
|
62 |
+
'shape_predictor_68_face_landmarks.dat')
|
63 |
+
return dlib.shape_predictor(path)
|
64 |
+
|
65 |
+
def _load_encoder(self) -> nn.Module:
|
66 |
+
ckpt_path = huggingface_hub.hf_hub_download('public-data/DualStyleGAN',
|
67 |
+
'models/encoder.pt')
|
68 |
+
ckpt = torch.load(ckpt_path, map_location='cpu')
|
69 |
+
opts = ckpt['opts']
|
70 |
+
opts['device'] = self.device.type
|
71 |
+
opts['checkpoint_path'] = ckpt_path
|
72 |
+
opts = argparse.Namespace(**opts)
|
73 |
+
model = pSp(opts)
|
74 |
+
model.to(self.device)
|
75 |
+
model.eval()
|
76 |
+
return model
|
77 |
+
|
78 |
+
@staticmethod
|
79 |
+
def _create_transform() -> Callable:
|
80 |
+
transform = T.Compose([
|
81 |
+
T.Resize(256),
|
82 |
+
T.CenterCrop(256),
|
83 |
+
T.ToTensor(),
|
84 |
+
T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
|
85 |
+
])
|
86 |
+
return transform
|
87 |
+
|
88 |
+
def _load_generator(self, style_type: str) -> nn.Module:
|
89 |
+
model = DualStyleGAN(1024, 512, 8, 2, res_index=6)
|
90 |
+
ckpt_path = huggingface_hub.hf_hub_download(
|
91 |
+
'public-data/DualStyleGAN', f'models/{style_type}/generator.pt')
|
92 |
+
ckpt = torch.load(ckpt_path, map_location='cpu')
|
93 |
+
model.load_state_dict(ckpt['g_ema'])
|
94 |
+
model.to(self.device)
|
95 |
+
model.eval()
|
96 |
+
return model
|
97 |
+
|
98 |
+
@staticmethod
|
99 |
+
def _load_exstylecode(style_type: str) -> dict[str, np.ndarray]:
|
100 |
+
if style_type in ['cartoon', 'caricature', 'anime']:
|
101 |
+
filename = 'refined_exstyle_code.npy'
|
102 |
+
else:
|
103 |
+
filename = 'exstyle_code.npy'
|
104 |
+
path = huggingface_hub.hf_hub_download(
|
105 |
+
'public-data/DualStyleGAN', f'models/{style_type}/{filename}')
|
106 |
+
exstyles = np.load(path, allow_pickle=True).item()
|
107 |
+
return exstyles
|
108 |
+
|
109 |
+
def detect_and_align_face(self, image: str) -> np.ndarray:
|
110 |
+
image = align_face(filepath=image, predictor=self.landmark_model)
|
111 |
+
return image
|
112 |
+
|
113 |
+
@staticmethod
|
114 |
+
def denormalize(tensor: torch.Tensor) -> torch.Tensor:
|
115 |
+
return torch.clamp((tensor + 1) / 2 * 255, 0, 255).to(torch.uint8)
|
116 |
+
|
117 |
+
def postprocess(self, tensor: torch.Tensor) -> np.ndarray:
|
118 |
+
tensor = self.denormalize(tensor)
|
119 |
+
return tensor.cpu().numpy().transpose(1, 2, 0)
|
120 |
+
|
121 |
+
@torch.inference_mode()
|
122 |
+
def reconstruct_face(self,
|
123 |
+
image: np.ndarray) -> tuple[np.ndarray, torch.Tensor]:
|
124 |
+
image = PIL.Image.fromarray(image)
|
125 |
+
input_data = self.transform(image).unsqueeze(0).to(self.device)
|
126 |
+
img_rec, instyle = self.encoder(input_data,
|
127 |
+
randomize_noise=False,
|
128 |
+
return_latents=True,
|
129 |
+
z_plus_latent=True,
|
130 |
+
return_z_plus_latent=True,
|
131 |
+
resize=False)
|
132 |
+
img_rec = torch.clamp(img_rec.detach(), -1, 1)
|
133 |
+
img_rec = self.postprocess(img_rec[0])
|
134 |
+
return img_rec, instyle
|
135 |
+
|
136 |
+
@torch.inference_mode()
|
137 |
+
def generate(self, style_type: str, style_id: int, structure_weight: float,
|
138 |
+
color_weight: float, structure_only: bool,
|
139 |
+
instyle: torch.Tensor) -> np.ndarray:
|
140 |
+
generator = self.generator_dict[style_type]
|
141 |
+
exstyles = self.exstyle_dict[style_type]
|
142 |
+
|
143 |
+
style_id = int(style_id)
|
144 |
+
stylename = list(exstyles.keys())[style_id]
|
145 |
+
|
146 |
+
latent = torch.tensor(exstyles[stylename]).to(self.device)
|
147 |
+
if structure_only:
|
148 |
+
latent[0, 7:18] = instyle[0, 7:18]
|
149 |
+
exstyle = generator.generator.style(
|
150 |
+
latent.reshape(latent.shape[0] * latent.shape[1],
|
151 |
+
latent.shape[2])).reshape(latent.shape)
|
152 |
+
|
153 |
+
img_gen, _ = generator([instyle],
|
154 |
+
exstyle,
|
155 |
+
z_plus_latent=True,
|
156 |
+
truncation=0.7,
|
157 |
+
truncation_latent=0,
|
158 |
+
use_res=True,
|
159 |
+
interp_weights=[structure_weight] * 7 +
|
160 |
+
[color_weight] * 11)
|
161 |
+
img_gen = torch.clamp(img_gen.detach(), -1, 1)
|
162 |
+
img_gen = self.postprocess(img_gen[0])
|
163 |
+
return img_gen
|
images/95UF6LXe-Lo.jpg
ADDED
![]() |
Git LFS Details
|
images/ILip77SbmOE.jpg
ADDED
![]() |
Git LFS Details
|
images/README.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
These images are freely-usable ones from [Unsplash](https://unsplash.com/).
|
2 |
+
|
3 |
+
- https://unsplash.com/photos/rDEOVtE7vOs
|
4 |
+
- https://unsplash.com/photos/et_78QkMMQs
|
5 |
+
- https://unsplash.com/photos/ILip77SbmOE
|
6 |
+
- https://unsplash.com/photos/95UF6LXe-Lo
|
7 |
+
|
images/et_78QkMMQs.jpg
ADDED
![]() |
Git LFS Details
|
images/rDEOVtE7vOs.jpg
ADDED
![]() |
Git LFS Details
|
packages.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
cmake
|
2 |
+
ninja-build
|
patch
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
diff --git a/model/stylegan/op/fused_act.py b/model/stylegan/op/fused_act.py
|
2 |
+
index c3b735c..67e7972 100644
|
3 |
+
--- a/model/stylegan/op/fused_act.py
|
4 |
+
+++ b/model/stylegan/op/fused_act.py
|
5 |
+
@@ -7,14 +7,6 @@ from torch.autograd import Function
|
6 |
+
from torch.utils.cpp_extension import load
|
7 |
+
|
8 |
+
|
9 |
+
-module_path = os.path.dirname(__file__)
|
10 |
+
-fused = load(
|
11 |
+
- "fused",
|
12 |
+
- sources=[
|
13 |
+
- os.path.join(module_path, "fused_bias_act.cpp"),
|
14 |
+
- os.path.join(module_path, "fused_bias_act_kernel.cu"),
|
15 |
+
- ],
|
16 |
+
-)
|
17 |
+
|
18 |
+
|
19 |
+
class FusedLeakyReLUFunctionBackward(Function):
|
20 |
+
diff --git a/model/stylegan/op/upfirdn2d.py b/model/stylegan/op/upfirdn2d.py
|
21 |
+
index 67e0375..97565d7 100644
|
22 |
+
--- a/model/stylegan/op/upfirdn2d.py
|
23 |
+
+++ b/model/stylegan/op/upfirdn2d.py
|
24 |
+
@@ -7,14 +7,6 @@ from torch.autograd import Function
|
25 |
+
from torch.utils.cpp_extension import load
|
26 |
+
|
27 |
+
|
28 |
+
-module_path = os.path.dirname(__file__)
|
29 |
+
-upfirdn2d_op = load(
|
30 |
+
- "upfirdn2d",
|
31 |
+
- sources=[
|
32 |
+
- os.path.join(module_path, "upfirdn2d.cpp"),
|
33 |
+
- os.path.join(module_path, "upfirdn2d_kernel.cu"),
|
34 |
+
- ],
|
35 |
+
-)
|
36 |
+
|
37 |
+
|
38 |
+
class UpFirDn2dBackward(Function):
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dlib==19.24.2
|
2 |
+
numpy==1.23.5
|
3 |
+
opencv-python-headless==4.8.0.74
|
4 |
+
Pillow==9.5.0
|
5 |
+
scipy==1.10.1
|
6 |
+
torch==2.0.1
|
7 |
+
torchvision==0.15.2
|
style.css
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
h1 {
|
2 |
+
text-align: center;
|
3 |
+
}
|
4 |
+
img#overview {
|
5 |
+
max-width: 800px;
|
6 |
+
max-height: 600px;
|
7 |
+
display: block;
|
8 |
+
margin: auto;
|
9 |
+
}
|
10 |
+
img#style-image {
|
11 |
+
max-width: 1000px;
|
12 |
+
max-height: 600px;
|
13 |
+
}
|
14 |
+
img#visitor-badge {
|
15 |
+
display: block;
|
16 |
+
margin: auto;
|
17 |
+
}
|