Spaces:
Running
Running
Upload 5 files
Browse files
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title: Free Multi Models Text-to-Image Demo
|
3 |
emoji: ππ
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
app_file: app.py
|
9 |
short_description: Text-to-Image
|
10 |
pinned: true
|
|
|
1 |
---
|
2 |
+
title: Free Multi Models Text-to-Image Demo V2
|
3 |
emoji: ππ
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.40.0
|
8 |
app_file: app.py
|
9 |
short_description: Text-to-Image
|
10 |
pinned: true
|
app.py
CHANGED
@@ -1,104 +1,94 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from model import models
|
3 |
-
from multit2i import (
|
4 |
-
load_models,
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
"""
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
[
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
)
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
gr.
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
show_progress="full",
|
96 |
-
show_api=True,
|
97 |
-
).then(save_gallery_images, [results], [results, image_files], queue=False, show_api=False)
|
98 |
-
clear_prompt.click(lambda: None, None, [prompt], queue=False, show_api=False)
|
99 |
-
clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
|
100 |
-
recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
|
101 |
-
[positive_prefix, positive_suffix, negative_prefix, negative_suffix], queue=False, show_api=False)
|
102 |
-
|
103 |
-
demo.queue()
|
104 |
-
demo.launch()
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from model import models
|
3 |
+
from multit2i import (
|
4 |
+
load_models, infer_fn, infer_rand_fn, save_gallery,
|
5 |
+
change_model, warm_model, get_model_info_md, loaded_models,
|
6 |
+
get_positive_prefix, get_positive_suffix, get_negative_prefix, get_negative_suffix,
|
7 |
+
get_recom_prompt_type, set_recom_prompt_preset, get_tag_type,
|
8 |
+
)
|
9 |
+
|
10 |
+
max_images = 8
|
11 |
+
load_models(models)
|
12 |
+
|
13 |
+
css = """
|
14 |
+
.model_info { text-align: center; }
|
15 |
+
.output { width=112px; height=112px; !important; }
|
16 |
+
.gallery { width=100%; min_height=768px; !important; }
|
17 |
+
"""
|
18 |
+
|
19 |
+
with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo:
|
20 |
+
with gr.Column():
|
21 |
+
with gr.Group():
|
22 |
+
model_name = gr.Dropdown(label="Select Model", choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True)
|
23 |
+
model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]), elem_classes="model_info")
|
24 |
+
with gr.Group():
|
25 |
+
clear_prompt = gr.Button(value="Clear Prompt ποΈ", size="sm", scale=1)
|
26 |
+
prompt = gr.Text(label="Prompt", lines=2, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
|
27 |
+
neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="", visible=False)
|
28 |
+
with gr.Accordion("Recommended Prompt", open=False):
|
29 |
+
recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
|
30 |
+
with gr.Row():
|
31 |
+
positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
|
32 |
+
positive_suffix = gr.CheckboxGroup(label="Use Positive Suffix", choices=get_positive_suffix(), value=["Common"])
|
33 |
+
negative_prefix = gr.CheckboxGroup(label="Use Negative Prefix", choices=get_negative_prefix(), value=[], visible=False)
|
34 |
+
negative_suffix = gr.CheckboxGroup(label="Use Negative Suffix", choices=get_negative_suffix(), value=["Common"], visible=False)
|
35 |
+
image_num = gr.Slider(label="Number of images", minimum=1, maximum=max_images, value=1, step=1, interactive=True, scale=1)
|
36 |
+
with gr.Row():
|
37 |
+
run_button = gr.Button("Generate Image", scale=6)
|
38 |
+
random_button = gr.Button("Random Model π²", scale=3)
|
39 |
+
stop_button = gr.Button('Stop', interactive=False, scale=1)
|
40 |
+
with gr.Column():
|
41 |
+
with gr.Group():
|
42 |
+
with gr.Row():
|
43 |
+
output = [gr.Image(label='', elem_classes="output", type="filepath", format=".png",
|
44 |
+
show_download_button=True, show_share_button=False, show_label=False,
|
45 |
+
interactive=False, min_width=80, visible=True) for _ in range(max_images)]
|
46 |
+
with gr.Group():
|
47 |
+
results = gr.Gallery(label="Gallery", elem_classes="gallery", interactive=False, show_download_button=True, show_share_button=False,
|
48 |
+
container=True, format="png", object_fit="cover", columns=2, rows=2)
|
49 |
+
image_files = gr.Files(label="Download", interactive=False)
|
50 |
+
clear_results = gr.Button("Clear Gallery / Download ποΈ")
|
51 |
+
with gr.Column():
|
52 |
+
examples = gr.Examples(
|
53 |
+
examples = [
|
54 |
+
["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
|
55 |
+
["sailor moon, magical girl transformation, sparkles and ribbons, soft pastel colors, crescent moon motif, starry night sky background, shoujo manga style"],
|
56 |
+
["kafuu chino, 1girl, solo"],
|
57 |
+
["1girl"],
|
58 |
+
["beautiful sunset"],
|
59 |
+
],
|
60 |
+
inputs=[prompt],
|
61 |
+
)
|
62 |
+
gr.Markdown(
|
63 |
+
f"""This demo was created in reference to the following demos.<br>
|
64 |
+
[Nymbo/Flood](https://huggingface.co/spaces/Nymbo/Flood),
|
65 |
+
[Yntec/ToyWorldXL](https://huggingface.co/spaces/Yntec/ToyWorldXL),
|
66 |
+
[Yntec/Diffusion80XX](https://huggingface.co/spaces/Yntec/Diffusion80XX).
|
67 |
+
"""
|
68 |
+
)
|
69 |
+
gr.DuplicateButton(value="Duplicate Space")
|
70 |
+
|
71 |
+
gr.on(triggers=[run_button.click, prompt.submit, random_button.click], fn=lambda: gr.update(interactive=True), inputs=None, outputs=stop_button, show_api=False)
|
72 |
+
model_name.change(change_model, [model_name], [model_info], queue=False, show_api=False)\
|
73 |
+
.success(warm_model, [model_name], None, queue=True, show_api=False)
|
74 |
+
for i, o in enumerate(output):
|
75 |
+
img_i = gr.Number(i, visible=False)
|
76 |
+
image_num.change(lambda i, n: gr.update(visible = (i < n)), [img_i, image_num], o, show_api=False)
|
77 |
+
gen_event = gr.on(triggers=[run_button.click, prompt.submit],
|
78 |
+
fn=lambda i, n, m, t1, t2, l1, l2, l3, l4: infer_fn(m, t1, t2, l1, l2, l3, l4) if (i < n) else None,
|
79 |
+
inputs=[img_i, image_num, model_name, prompt, neg_prompt, positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
80 |
+
outputs=[o], queue=True, show_api=True)
|
81 |
+
gen_event2 = gr.on(triggers=[random_button.click],
|
82 |
+
fn=lambda i, n, m, t1, t2, l1, l2, l3, l4: infer_rand_fn(m, t1, t2, l1, l2, l3, l4) if (i < n) else None,
|
83 |
+
inputs=[img_i, image_num, model_name, prompt, neg_prompt, positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
84 |
+
outputs=[o], queue=True, show_api=True)
|
85 |
+
o.change(save_gallery, [o, results], [results, image_files], show_api=False)
|
86 |
+
stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event, gen_event2], show_api=False)
|
87 |
+
|
88 |
+
clear_prompt.click(lambda: None, None, [prompt], queue=False, show_api=False)
|
89 |
+
clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
|
90 |
+
recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
|
91 |
+
[positive_prefix, positive_suffix, negative_prefix, negative_suffix], queue=False, show_api=False)
|
92 |
+
|
93 |
+
demo.queue()
|
94 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model.py
CHANGED
@@ -9,6 +9,7 @@ models = [
|
|
9 |
'votepurchase/ponyDiffusionV6XL',
|
10 |
'eienmojiki/Anything-XL',
|
11 |
'eienmojiki/Starry-XL-v5.2',
|
|
|
12 |
'digiplay/majicMIX_sombre_v2',
|
13 |
'digiplay/majicMIX_realistic_v7',
|
14 |
'votepurchase/counterfeitV30_v30',
|
|
|
9 |
'votepurchase/ponyDiffusionV6XL',
|
10 |
'eienmojiki/Anything-XL',
|
11 |
'eienmojiki/Starry-XL-v5.2',
|
12 |
+
"digiplay/MilkyWonderland_v1",
|
13 |
'digiplay/majicMIX_sombre_v2',
|
14 |
'digiplay/majicMIX_realistic_v7',
|
15 |
'votepurchase/counterfeitV30_v30',
|
multit2i.py
CHANGED
@@ -80,31 +80,32 @@ def get_t2i_model_info_dict(repo_id: str):
|
|
80 |
return info
|
81 |
|
82 |
|
83 |
-
def
|
|
|
84 |
from datetime import datetime, timezone, timedelta
|
85 |
-
|
86 |
dt_now = datetime.now(timezone(timedelta(hours=9)))
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
return
|
108 |
|
109 |
|
110 |
# https://github.com/gradio-app/gradio/blob/main/gradio/external.py
|
@@ -124,7 +125,7 @@ def load_from_model(model_name: str, hf_token: str = None):
|
|
124 |
f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
|
125 |
)
|
126 |
headers["X-Wait-For-Model"] = "true"
|
127 |
-
client = huggingface_hub.InferenceClient(model=model_name, headers=headers, token=hf_token, timeout=
|
128 |
inputs = gr.components.Textbox(label="Input")
|
129 |
outputs = gr.components.Image(label="Output")
|
130 |
fn = client.text_to_image
|
@@ -163,28 +164,9 @@ def load_model(model_name: str):
|
|
163 |
return loaded_models[model_name]
|
164 |
|
165 |
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
async with sem:
|
170 |
-
try:
|
171 |
-
await asyncio.sleep(0.5)
|
172 |
-
return await asyncio.to_thread(load_model, model)
|
173 |
-
except Exception as e:
|
174 |
-
print(e)
|
175 |
-
tasks = [asyncio.create_task(async_load_model(model)) for model in models]
|
176 |
-
return await asyncio.gather(*tasks, return_exceptions=True)
|
177 |
-
|
178 |
-
|
179 |
-
def load_models(models: list, limit: int=5):
|
180 |
-
loop = asyncio.new_event_loop()
|
181 |
-
try:
|
182 |
-
loop.run_until_complete(async_load_models(models, limit))
|
183 |
-
except Exception as e:
|
184 |
-
print(e)
|
185 |
-
pass
|
186 |
-
finally:
|
187 |
-
loop.close()
|
188 |
|
189 |
|
190 |
positive_prefix = {
|
@@ -298,72 +280,73 @@ def change_model(model_name: str):
|
|
298 |
return get_model_info_md(model_name)
|
299 |
|
300 |
|
301 |
-
def
|
302 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
303 |
import random
|
304 |
-
|
305 |
rand = random.randint(1, 500)
|
306 |
for i in range(rand):
|
307 |
-
|
308 |
-
|
|
|
|
|
|
|
309 |
try:
|
310 |
-
|
311 |
-
|
312 |
-
image_path = model(prompt + seed, neg_prompt)
|
313 |
-
image = Image.open(image_path).convert('RGBA')
|
314 |
-
except Exception as e:
|
315 |
print(e)
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: float, model_name: str,
|
321 |
-
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
322 |
-
import asyncio
|
323 |
-
progress(0, desc="Start inference.")
|
324 |
-
image_num = int(image_num)
|
325 |
-
images = results if results else []
|
326 |
-
image_num_offset = len(images)
|
327 |
-
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
328 |
-
tasks = [asyncio.create_task(asyncio.to_thread(infer, prompt, neg_prompt, model_name)) for i in range(image_num)]
|
329 |
-
await asyncio.sleep(0)
|
330 |
-
for task in tasks:
|
331 |
-
progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.")
|
332 |
-
try:
|
333 |
-
result = await asyncio.wait_for(task, timeout=120)
|
334 |
-
except (Exception, asyncio.TimeoutError) as e:
|
335 |
-
print(e)
|
336 |
-
if not task.done(): task.cancel()
|
337 |
-
result = None
|
338 |
-
image_num_offset += 1
|
339 |
with lock:
|
340 |
-
|
341 |
-
|
342 |
-
|
|
|
343 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
344 |
|
345 |
-
|
346 |
-
|
|
|
347 |
import random
|
348 |
-
|
349 |
-
|
350 |
-
images = results if results else []
|
351 |
-
image_num_offset = len(images)
|
352 |
random.seed()
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
image_num_offset += 1
|
366 |
-
with lock:
|
367 |
-
if result and len(result) == 2 and result[1]: images.append(result)
|
368 |
-
await asyncio.sleep(0)
|
369 |
-
yield images
|
|
|
80 |
return info
|
81 |
|
82 |
|
83 |
+
def rename_image(image_path: str | None, model_name: str):
|
84 |
+
from PIL import Image
|
85 |
from datetime import datetime, timezone, timedelta
|
86 |
+
if image_path is None: return None
|
87 |
dt_now = datetime.now(timezone(timedelta(hours=9)))
|
88 |
+
filename = f"{model_name.split('/')[-1]}_{dt_now.strftime('%Y%m%d_%H%M%S')}.png"
|
89 |
+
try:
|
90 |
+
if Path(image_path).exists():
|
91 |
+
png_path = "image.png"
|
92 |
+
Image.open(image_path).convert('RGBA').save(png_path, "PNG")
|
93 |
+
new_path = str(Path(png_path).resolve().rename(Path(filename).resolve()))
|
94 |
+
return new_path
|
95 |
+
else:
|
96 |
+
return None
|
97 |
+
except Exception as e:
|
98 |
+
print(e)
|
99 |
+
return None
|
100 |
+
|
101 |
+
|
102 |
+
def save_gallery(image_path: str | None, images: list[tuple] | None):
|
103 |
+
if images is None: images = []
|
104 |
+
files = [i[0] for i in images]
|
105 |
+
if image_path is None: return images, files
|
106 |
+
files.insert(0, str(image_path))
|
107 |
+
images.insert(0, (str(image_path), Path(image_path).stem))
|
108 |
+
return images, files
|
109 |
|
110 |
|
111 |
# https://github.com/gradio-app/gradio/blob/main/gradio/external.py
|
|
|
125 |
f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
|
126 |
)
|
127 |
headers["X-Wait-For-Model"] = "true"
|
128 |
+
client = huggingface_hub.InferenceClient(model=model_name, headers=headers, token=hf_token, timeout=600)
|
129 |
inputs = gr.components.Textbox(label="Input")
|
130 |
outputs = gr.components.Image(label="Output")
|
131 |
fn = client.text_to_image
|
|
|
164 |
return loaded_models[model_name]
|
165 |
|
166 |
|
167 |
+
def load_models(models: list):
|
168 |
+
for model in models:
|
169 |
+
load_model(model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
|
172 |
positive_prefix = {
|
|
|
280 |
return get_model_info_md(model_name)
|
281 |
|
282 |
|
283 |
+
def warm_model(model_name: str):
|
284 |
+
model = load_model(model_name)
|
285 |
+
if model:
|
286 |
+
try:
|
287 |
+
print(f"Warming model: {model_name}")
|
288 |
+
model(" ")
|
289 |
+
except Exception as e:
|
290 |
+
print(e)
|
291 |
+
|
292 |
+
|
293 |
+
async def infer(model_name: str, prompt: str, neg_prompt: str, timeout: float):
|
294 |
import random
|
295 |
+
noise = ""
|
296 |
rand = random.randint(1, 500)
|
297 |
for i in range(rand):
|
298 |
+
noise += " "
|
299 |
+
model = load_model(model_name)
|
300 |
+
if not model: return None
|
301 |
+
task = asyncio.create_task(asyncio.to_thread(model, f'{prompt} {noise}'))
|
302 |
+
await asyncio.sleep(0)
|
303 |
try:
|
304 |
+
result = await asyncio.wait_for(task, timeout=timeout)
|
305 |
+
except (Exception, asyncio.TimeoutError) as e:
|
|
|
|
|
|
|
306 |
print(e)
|
307 |
+
print(f"Task timed out: {model_name}")
|
308 |
+
if not task.done(): task.cancel()
|
309 |
+
result = None
|
310 |
+
if task.done() and result is not None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
with lock:
|
312 |
+
image = rename_image(result, model_name)
|
313 |
+
return image
|
314 |
+
return None
|
315 |
+
|
316 |
|
317 |
+
infer_timeout = 300
|
318 |
+
def infer_fn(model_name: str, prompt: str, neg_prompt: str,
|
319 |
+
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = []):
|
320 |
+
if model_name == 'NA':
|
321 |
+
return None
|
322 |
+
try:
|
323 |
+
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
324 |
+
loop = asyncio.new_event_loop()
|
325 |
+
result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, infer_timeout))
|
326 |
+
except (Exception, asyncio.CancelledError) as e:
|
327 |
+
print(e)
|
328 |
+
print(f"Task aborted: {model_name}")
|
329 |
+
result = None
|
330 |
+
finally:
|
331 |
+
loop.close()
|
332 |
+
return result
|
333 |
|
334 |
+
|
335 |
+
def infer_rand_fn(model_name_dummy: str, prompt: str, neg_prompt: str,
|
336 |
+
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = []):
|
337 |
import random
|
338 |
+
if model_name_dummy == 'NA':
|
339 |
+
return None
|
|
|
|
|
340 |
random.seed()
|
341 |
+
model_name = random.choice(list(loaded_models.keys()))
|
342 |
+
try:
|
343 |
+
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
344 |
+
loop = asyncio.new_event_loop()
|
345 |
+
result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, infer_timeout))
|
346 |
+
except (Exception, asyncio.CancelledError) as e:
|
347 |
+
print(e)
|
348 |
+
print(f"Task aborted: {model_name}")
|
349 |
+
result = None
|
350 |
+
finally:
|
351 |
+
loop.close()
|
352 |
+
return result
|
|
|
|
|
|
|
|
|
|