Spaces:
Running
on
Zero
Running
on
Zero
Artiprocher
commited on
Commit
•
812f04c
1
Parent(s):
2927c6c
Update app.py
Browse files
app.py
CHANGED
@@ -6,17 +6,6 @@ login(token=os.getenv('HF_AK'))
|
|
6 |
from diffsynth import download_models
|
7 |
download_models(["Kolors", "FLUX.1-dev"], downloading_priority=["HuggingFace", "ModelScope"])
|
8 |
|
9 |
-
def get_file_list(path):
|
10 |
-
file_list = []
|
11 |
-
for filename in os.listdir(path):
|
12 |
-
file_path = os.path.join(path, filename)
|
13 |
-
if os.path.isdir(file_path):
|
14 |
-
file_list.extend(get_file_list(file_path))
|
15 |
-
else:
|
16 |
-
file_list.append(file_path)
|
17 |
-
return file_list
|
18 |
-
print([i for i in get_file_list("models") if "cache" not in i])
|
19 |
-
|
20 |
import gradio as gr
|
21 |
from diffsynth import ModelManager, SDImagePipeline, SDXLImagePipeline, SD3ImagePipeline, HunyuanDiTImagePipeline, FluxImagePipeline
|
22 |
import os, torch
|
@@ -141,6 +130,12 @@ def load_model(model_type, model_path):
|
|
141 |
return model_manager, pipe
|
142 |
|
143 |
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
model_dict = {}
|
145 |
|
146 |
with gr.Blocks() as app:
|
@@ -222,7 +217,6 @@ with gr.Blocks() as app:
|
|
222 |
outputs=[output_image],
|
223 |
triggers=run_button.click
|
224 |
)
|
225 |
-
@spaces.GPU(duration=60)
|
226 |
def generate_image(model_type, model_path, prompt, negative_prompt, cfg_scale, embedded_guidance, num_inference_steps, height, width, seed, *args, progress=gr.Progress()):
|
227 |
_, pipe = load_model(model_type, model_path)
|
228 |
input_params = {
|
@@ -255,8 +249,7 @@ with gr.Blocks() as app:
|
|
255 |
"masks": masks,
|
256 |
"mask_scales": mask_scales,
|
257 |
})
|
258 |
-
|
259 |
-
image = pipe(**input_params)
|
260 |
return image
|
261 |
|
262 |
@gr.on(inputs=[output_image] + canvas_list, outputs=canvas_list, triggers=output_to_painter_button.click)
|
|
|
6 |
from diffsynth import download_models
|
7 |
download_models(["Kolors", "FLUX.1-dev"], downloading_priority=["HuggingFace", "ModelScope"])
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import gradio as gr
|
10 |
from diffsynth import ModelManager, SDImagePipeline, SDXLImagePipeline, SD3ImagePipeline, HunyuanDiTImagePipeline, FluxImagePipeline
|
11 |
import os, torch
|
|
|
130 |
return model_manager, pipe
|
131 |
|
132 |
|
133 |
+
@spaces.GPU(duration=60)
|
134 |
+
def infer(pipe, input_params, seed):
|
135 |
+
torch.manual_seed(seed)
|
136 |
+
return pipe(**input_params)
|
137 |
+
|
138 |
+
|
139 |
model_dict = {}
|
140 |
|
141 |
with gr.Blocks() as app:
|
|
|
217 |
outputs=[output_image],
|
218 |
triggers=run_button.click
|
219 |
)
|
|
|
220 |
def generate_image(model_type, model_path, prompt, negative_prompt, cfg_scale, embedded_guidance, num_inference_steps, height, width, seed, *args, progress=gr.Progress()):
|
221 |
_, pipe = load_model(model_type, model_path)
|
222 |
input_params = {
|
|
|
249 |
"masks": masks,
|
250 |
"mask_scales": mask_scales,
|
251 |
})
|
252 |
+
image = infer(pipe, input_params, seed)
|
|
|
253 |
return image
|
254 |
|
255 |
@gr.on(inputs=[output_image] + canvas_list, outputs=canvas_list, triggers=output_to_painter_button.click)
|