Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,7 @@ import gradio as gr
|
|
3 |
import numpy as np
|
4 |
import spaces
|
5 |
import torch
|
6 |
-
import
|
7 |
-
from huggingface_hub import hf_hub_download
|
8 |
from diffusers import DiffusionPipeline
|
9 |
from compel import Compel, ReturnedEmbeddingsType
|
10 |
from PIL import Image, PngImagePlugin
|
@@ -34,7 +33,18 @@ def add_comma_after_pattern_ti(text):
|
|
34 |
pattern = re.compile(r'\b\w+_\d+\b')
|
35 |
modified_text = pattern.sub(lambda x: x.group() + ',', text)
|
36 |
return modified_text
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
if not torch.cuda.is_available():
|
39 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
40 |
|
@@ -62,7 +72,6 @@ def infer(
|
|
62 |
num_inference_steps: int = 30,
|
63 |
randomize_seed: bool = True,
|
64 |
use_resolution_binning: bool = True,
|
65 |
-
progress=gr.Progress(track_tqdm=True),
|
66 |
):
|
67 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
68 |
generator = torch.Generator().manual_seed(seed)
|
@@ -107,7 +116,7 @@ def infer(
|
|
107 |
"height": height,
|
108 |
"guidance_scale": guidance_scale,
|
109 |
"num_inference_steps": num_inference_steps,
|
110 |
-
"model": "
|
111 |
"use_resolution_binning": use_resolution_binning,
|
112 |
"PreUrl": "https://huggingface.co/spaces/Menyu/miaomiaoHaremDogma11"
|
113 |
}
|
@@ -132,7 +141,7 @@ h1{text-align:center}
|
|
132 |
|
133 |
with gr.Blocks(css=css) as demo:
|
134 |
gr.Markdown("""# 梦羽的模型生成器
|
135 |
-
### 快速生成
|
136 |
with gr.Group():
|
137 |
with gr.Row():
|
138 |
prompt = gr.Text(
|
@@ -193,6 +202,7 @@ with gr.Blocks(css=css) as demo:
|
|
193 |
step=1,
|
194 |
value=28,
|
195 |
)
|
|
|
196 |
|
197 |
gr.Examples(
|
198 |
examples=examples,
|
@@ -220,6 +230,7 @@ with gr.Blocks(css=css) as demo:
|
|
220 |
guidance_scale,
|
221 |
num_inference_steps,
|
222 |
randomize_seed,
|
|
|
223 |
],
|
224 |
outputs=[result, seed],
|
225 |
)
|
|
|
3 |
import numpy as np
|
4 |
import spaces
|
5 |
import torch
|
6 |
+
import re
|
|
|
7 |
from diffusers import DiffusionPipeline
|
8 |
from compel import Compel, ReturnedEmbeddingsType
|
9 |
from PIL import Image, PngImagePlugin
|
|
|
33 |
pattern = re.compile(r'\b\w+_\d+\b')
|
34 |
modified_text = pattern.sub(lambda x: x.group() + ',', text)
|
35 |
return modified_text
|
36 |
+
|
37 |
+
def get_embed_new(prompt, pipe, compel, only_convert_string=False):
|
38 |
+
"""处理提示词的函数"""
|
39 |
+
if only_convert_string:
|
40 |
+
# 简单处理,添加逗号分隔
|
41 |
+
return add_comma_after_pattern_ti(prompt)
|
42 |
+
else:
|
43 |
+
# 如果需要更复杂的处理,可以在这里添加
|
44 |
+
return prompt
|
45 |
+
|
46 |
+
DESCRIPTION = "梦羽的模型生成器 - 快速生成 MiaomiaoHarem vPred Dogma 1.1 模型的图片"
|
47 |
+
|
48 |
if not torch.cuda.is_available():
|
49 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
50 |
|
|
|
72 |
num_inference_steps: int = 30,
|
73 |
randomize_seed: bool = True,
|
74 |
use_resolution_binning: bool = True,
|
|
|
75 |
):
|
76 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
77 |
generator = torch.Generator().manual_seed(seed)
|
|
|
116 |
"height": height,
|
117 |
"guidance_scale": guidance_scale,
|
118 |
"num_inference_steps": num_inference_steps,
|
119 |
+
"model": "qwen-image",
|
120 |
"use_resolution_binning": use_resolution_binning,
|
121 |
"PreUrl": "https://huggingface.co/spaces/Menyu/miaomiaoHaremDogma11"
|
122 |
}
|
|
|
141 |
|
142 |
with gr.Blocks(css=css) as demo:
|
143 |
gr.Markdown("""# 梦羽的模型生成器
|
144 |
+
### 快速生成 qwen-image 模型的图片""")
|
145 |
with gr.Group():
|
146 |
with gr.Row():
|
147 |
prompt = gr.Text(
|
|
|
202 |
step=1,
|
203 |
value=28,
|
204 |
)
|
205 |
+
use_resolution_binning = gr.Checkbox(label="使用分辨率分箱", value=True)
|
206 |
|
207 |
gr.Examples(
|
208 |
examples=examples,
|
|
|
230 |
guidance_scale,
|
231 |
num_inference_steps,
|
232 |
randomize_seed,
|
233 |
+
use_resolution_binning,
|
234 |
],
|
235 |
outputs=[result, seed],
|
236 |
)
|