update
Browse files- .cache/GeneratorV2_ffhq_Arcane_210624_e350.pt +3 -0
- .cache/GeneratorV2_gldv2_Hayao.pt +3 -0
- .cache/GeneratorV2_gldv2_Shinkai.pt +3 -0
- .cache/GeneratorV2_train_photo_Hayao.pt +3 -0
- app.py +13 -21
- example/face/anne.jpg +0 -0
- example/face/boy2.jpg +0 -0
- example/face/cap.jpg +0 -0
- example/face/dune2.jpg +0 -0
- example/face/elon.jpg +0 -0
- example/face/girl.jpg +0 -0
- example/face/girl4.jpg +0 -0
- example/face/girl6.jpg +0 -0
- example/face/leo.jpg +0 -0
- example/face/man2.jpg +0 -0
- example/face/nat_.jpg +0 -0
- example/face/seydoux.jpg +0 -0
- example/face/tobey.jpg +0 -0
- example/landscape/pexels-arnie-chou-304906-1004122.jpg +0 -0
- example/landscape/pexels-camilacarneiro-6318793.jpg +0 -0
- example/landscape/pexels-jimmy-teoh-294331-951531.jpg +0 -0
- example/landscape/pexels-nandhukumar-450441.jpg +0 -0
- inference.py +7 -0
- output/out.jpg +0 -0
.cache/GeneratorV2_ffhq_Arcane_210624_e350.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:697e85aed0228a507ebcd834c393033598ae6ce3d855a4d7d9aa7cab5cf8319e
|
3 |
+
size 8608740
|
.cache/GeneratorV2_gldv2_Hayao.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:24cda17c176fb9c052471770228d92aee16e1c36a9ca98e7b5d586dd2d99169d
|
3 |
+
size 8607756
|
.cache/GeneratorV2_gldv2_Shinkai.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a3e74a1dc12a8050f52a4f11b526c0847b7bd6612d0adbdbed0b4713a75140c1
|
3 |
+
size 8607920
|
.cache/GeneratorV2_train_photo_Hayao.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e2485770ca36d073bc2b4992b8e58501be888559919e64d61c113ba6e6d5734
|
3 |
+
size 8605752
|
app.py
CHANGED
@@ -12,18 +12,18 @@ def inference(
|
|
12 |
image: np.ndarray,
|
13 |
style,
|
14 |
imgsz=None,
|
|
|
15 |
):
|
16 |
if imgsz is not None:
|
17 |
imgsz = int(imgsz)
|
18 |
|
19 |
-
retain_color =
|
20 |
|
21 |
weight = {
|
22 |
-
"AnimeGAN_Hayao": "hayao",
|
23 |
-
"AnimeGAN_Shinkai": "shinkai",
|
24 |
"AnimeGANv2_Hayao": "hayao:v2",
|
25 |
"AnimeGANv2_Shinkai": "shinkai:v2",
|
26 |
"AnimeGANv2_Arcane": "arcane:v2",
|
|
|
27 |
}[style]
|
28 |
predictor = Predictor(
|
29 |
weight,
|
@@ -39,29 +39,23 @@ def inference(
|
|
39 |
return anime_image, save_path
|
40 |
|
41 |
|
42 |
-
title = "
|
43 |
-
description = r"""
|
44 |
-
article = r"""
|
45 |
-
[](https://github.com/ptran1203/pytorch-animeGAN)
|
46 |
-
### 🗻 Demo
|
47 |
-
|
48 |
-
"""
|
49 |
|
50 |
gr.Interface(
|
51 |
fn=inference,
|
52 |
inputs=[
|
53 |
-
gr.components.Image(label="
|
54 |
gr.Dropdown(
|
55 |
[
|
56 |
-
'AnimeGAN_Hayao',
|
57 |
-
'AnimeGAN_Shinkai',
|
58 |
'AnimeGANv2_Hayao',
|
59 |
'AnimeGANv2_Shinkai',
|
60 |
'AnimeGANv2_Arcane',
|
|
|
61 |
],
|
62 |
type="value",
|
63 |
value='AnimeGANv2_Hayao',
|
64 |
-
label='
|
65 |
),
|
66 |
gr.Dropdown(
|
67 |
[
|
@@ -74,24 +68,22 @@ gr.Interface(
|
|
74 |
],
|
75 |
type="value",
|
76 |
value=None,
|
77 |
-
label='
|
78 |
-
)
|
|
|
79 |
],
|
80 |
outputs=[
|
81 |
-
gr.components.Image(type="numpy", label="
|
82 |
-
gr.components.File(label="
|
83 |
],
|
84 |
title=title,
|
85 |
description=description,
|
86 |
-
article=article,
|
87 |
allow_flagging="never",
|
88 |
examples=[
|
89 |
['example/face/girl4.jpg', 'AnimeGANv2_Arcane', None],
|
90 |
['example/face/leo.jpg', 'AnimeGANv2_Arcane', None],
|
91 |
['example/face/cap.jpg', 'AnimeGANv2_Arcane', None],
|
92 |
['example/face/anne.jpg', 'AnimeGANv2_Arcane', None],
|
93 |
-
# ['example/boy2.jpg', 'AnimeGANv3_Arcane', "No"],
|
94 |
-
# ['example/cap.jpg', 'AnimeGANv3_Arcane', "No"],
|
95 |
['example/landscape/pexels-camilacarneiro-6318793.jpg', 'AnimeGANv2_Hayao', None],
|
96 |
['example/landscape/pexels-nandhukumar-450441.jpg', 'AnimeGANv2_Hayao', None],
|
97 |
]
|
|
|
12 |
image: np.ndarray,
|
13 |
style,
|
14 |
imgsz=None,
|
15 |
+
retain_color=False,
|
16 |
):
|
17 |
if imgsz is not None:
|
18 |
imgsz = int(imgsz)
|
19 |
|
20 |
+
retain_color = retain_color
|
21 |
|
22 |
weight = {
|
|
|
|
|
23 |
"AnimeGANv2_Hayao": "hayao:v2",
|
24 |
"AnimeGANv2_Shinkai": "shinkai:v2",
|
25 |
"AnimeGANv2_Arcane": "arcane:v2",
|
26 |
+
"AnimeGANv2_Test": "GeneratorV2_train_photo_Hayao.pt",
|
27 |
}[style]
|
28 |
predictor = Predictor(
|
29 |
weight,
|
|
|
39 |
return anime_image, save_path
|
40 |
|
41 |
|
42 |
+
title = "图片动漫风格转换"
|
43 |
+
description = r"""将图片转换成动漫风格"""
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
gr.Interface(
|
46 |
fn=inference,
|
47 |
inputs=[
|
48 |
+
gr.components.Image(label="输入图片"),
|
49 |
gr.Dropdown(
|
50 |
[
|
|
|
|
|
51 |
'AnimeGANv2_Hayao',
|
52 |
'AnimeGANv2_Shinkai',
|
53 |
'AnimeGANv2_Arcane',
|
54 |
+
'AnimeGANv2_Test',
|
55 |
],
|
56 |
type="value",
|
57 |
value='AnimeGANv2_Hayao',
|
58 |
+
label='转换风格'
|
59 |
),
|
60 |
gr.Dropdown(
|
61 |
[
|
|
|
68 |
],
|
69 |
type="value",
|
70 |
value=None,
|
71 |
+
label='图片大小'
|
72 |
+
),
|
73 |
+
gr.Checkbox(value=False, label="保留原图颜色"),
|
74 |
],
|
75 |
outputs=[
|
76 |
+
gr.components.Image(type="numpy", label="转换后图片"),
|
77 |
+
gr.components.File(label="下载转换图片")
|
78 |
],
|
79 |
title=title,
|
80 |
description=description,
|
|
|
81 |
allow_flagging="never",
|
82 |
examples=[
|
83 |
['example/face/girl4.jpg', 'AnimeGANv2_Arcane', None],
|
84 |
['example/face/leo.jpg', 'AnimeGANv2_Arcane', None],
|
85 |
['example/face/cap.jpg', 'AnimeGANv2_Arcane', None],
|
86 |
['example/face/anne.jpg', 'AnimeGANv2_Arcane', None],
|
|
|
|
|
87 |
['example/landscape/pexels-camilacarneiro-6318793.jpg', 'AnimeGANv2_Hayao', None],
|
88 |
['example/landscape/pexels-nandhukumar-450441.jpg', 'AnimeGANv2_Hayao', None],
|
89 |
]
|
example/face/anne.jpg
ADDED
![]() |
example/face/boy2.jpg
ADDED
![]() |
example/face/cap.jpg
ADDED
![]() |
example/face/dune2.jpg
ADDED
![]() |
example/face/elon.jpg
ADDED
![]() |
example/face/girl.jpg
ADDED
![]() |
example/face/girl4.jpg
ADDED
![]() |
example/face/girl6.jpg
ADDED
![]() |
example/face/leo.jpg
ADDED
![]() |
example/face/man2.jpg
ADDED
![]() |
example/face/nat_.jpg
ADDED
![]() |
example/face/seydoux.jpg
ADDED
![]() |
example/face/tobey.jpg
ADDED
![]() |
example/landscape/pexels-arnie-chou-304906-1004122.jpg
ADDED
![]() |
example/landscape/pexels-camilacarneiro-6318793.jpg
ADDED
![]() |
example/landscape/pexels-jimmy-teoh-294331-951531.jpg
ADDED
![]() |
example/landscape/pexels-nandhukumar-450441.jpg
ADDED
![]() |
inference.py
CHANGED
@@ -41,7 +41,14 @@ def profile(func):
|
|
41 |
|
42 |
def auto_load_weight(weight, version=None, map_location=None):
|
43 |
"""Auto load Generator version from weight."""
|
|
|
44 |
weight_name = os.path.basename(weight).lower()
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
if version is not None:
|
46 |
version = version.lower()
|
47 |
assert version in {"v1", "v2", "v3"}, f"Version {version} does not exist"
|
|
|
41 |
|
42 |
def auto_load_weight(weight, version=None, map_location=None):
|
43 |
"""Auto load Generator version from weight."""
|
44 |
+
cache_dir = os.path.expanduser(".cache")
|
45 |
weight_name = os.path.basename(weight).lower()
|
46 |
+
cached_weight = os.path.join(cache_dir, weight_name)
|
47 |
+
|
48 |
+
# Check if the cached weight file exists
|
49 |
+
if os.path.exists(cached_weight):
|
50 |
+
weight = cached_weight
|
51 |
+
|
52 |
if version is not None:
|
53 |
version = version.lower()
|
54 |
assert version in {"v1", "v2", "v3"}, f"Version {version} does not exist"
|
output/out.jpg
ADDED
![]() |