Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +17 -14
- multit2i.py +31 -14
app.py
CHANGED
@@ -23,22 +23,21 @@ load_models(models, 5)
|
|
23 |
|
24 |
|
25 |
css = """
|
26 |
-
#model_info { text-align: center;
|
27 |
"""
|
28 |
|
29 |
with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
30 |
with gr.Column():
|
31 |
-
with gr.Accordion("
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]), elem_id="model_info")
|
42 |
with gr.Group():
|
43 |
clear_prompt = gr.Button(value="Clear Prompt ποΈ", size="sm", scale=1)
|
44 |
prompt = gr.Text(label="Prompt", lines=1, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
|
@@ -77,9 +76,11 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
|
77 |
positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
78 |
outputs=[results],
|
79 |
queue=True,
|
|
|
|
|
80 |
show_progress="full",
|
81 |
show_api=True,
|
82 |
-
).
|
83 |
gr.on(
|
84 |
triggers=[random_button.click],
|
85 |
fn=infer_multi_random,
|
@@ -87,9 +88,11 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
|
87 |
positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
88 |
outputs=[results],
|
89 |
queue=True,
|
|
|
|
|
90 |
show_progress="full",
|
91 |
show_api=True,
|
92 |
-
).
|
93 |
clear_prompt.click(lambda: None, None, [prompt], queue=False, show_api=False)
|
94 |
clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
|
95 |
recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
|
|
|
23 |
|
24 |
|
25 |
css = """
|
26 |
+
#model_info { text-align: center; }
|
27 |
"""
|
28 |
|
29 |
with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
30 |
with gr.Column():
|
31 |
+
with gr.Accordion("Recommended Prompt", open=False):
|
32 |
+
recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
|
33 |
+
with gr.Row():
|
34 |
+
positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
|
35 |
+
positive_suffix = gr.CheckboxGroup(label="Use Positive Suffix", choices=get_positive_suffix(), value=["Common"])
|
36 |
+
negative_prefix = gr.CheckboxGroup(label="Use Negative Prefix", choices=get_negative_prefix(), value=[], visible=False)
|
37 |
+
negative_suffix = gr.CheckboxGroup(label="Use Negative Suffix", choices=get_negative_suffix(), value=["Common"], visible=False)
|
38 |
+
with gr.Accordion("Model", open=True):
|
39 |
+
model_name = gr.Dropdown(label="Select Model", show_label=False, choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True)
|
40 |
+
model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]), elem_id="model_info")
|
|
|
41 |
with gr.Group():
|
42 |
clear_prompt = gr.Button(value="Clear Prompt ποΈ", size="sm", scale=1)
|
43 |
prompt = gr.Text(label="Prompt", lines=1, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
|
|
|
76 |
positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
77 |
outputs=[results],
|
78 |
queue=True,
|
79 |
+
trigger_mode="multiple",
|
80 |
+
concurrency_limit=5,
|
81 |
show_progress="full",
|
82 |
show_api=True,
|
83 |
+
).then(save_gallery_images, [results], [results, image_files], queue=False, show_api=False)
|
84 |
gr.on(
|
85 |
triggers=[random_button.click],
|
86 |
fn=infer_multi_random,
|
|
|
88 |
positive_prefix, positive_suffix, negative_prefix, negative_suffix],
|
89 |
outputs=[results],
|
90 |
queue=True,
|
91 |
+
trigger_mode="multiple",
|
92 |
+
concurrency_limit=5,
|
93 |
show_progress="full",
|
94 |
show_api=True,
|
95 |
+
).then(save_gallery_images, [results], [results, image_files], queue=False, show_api=False)
|
96 |
clear_prompt.click(lambda: None, None, [prompt], queue=False, show_api=False)
|
97 |
clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
|
98 |
recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
|
multit2i.py
CHANGED
@@ -107,6 +107,8 @@ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
|
107 |
return gr.update(value=output_images), gr.update(value=output_paths)
|
108 |
|
109 |
|
|
|
|
|
110 |
def load_from_model(model_name: str, hf_token: str = None):
|
111 |
import httpx
|
112 |
import huggingface_hub
|
@@ -166,6 +168,7 @@ async def async_load_models(models: list, limit: int=5):
|
|
166 |
async def async_load_model(model: str):
|
167 |
async with sem:
|
168 |
try:
|
|
|
169 |
return await asyncio.to_thread(load_model, model)
|
170 |
except Exception as e:
|
171 |
print(e)
|
@@ -306,7 +309,7 @@ def infer(prompt: str, neg_prompt: str, model_name: str):
|
|
306 |
try:
|
307 |
model = load_model(model_name)
|
308 |
if not model: return (Image.Image(), None)
|
309 |
-
image_path = model(prompt + seed)
|
310 |
image = Image.open(image_path).convert('RGBA')
|
311 |
except Exception as e:
|
312 |
print(e)
|
@@ -316,35 +319,49 @@ def infer(prompt: str, neg_prompt: str, model_name: str):
|
|
316 |
|
317 |
async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: float, model_name: str,
|
318 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
319 |
-
|
|
|
320 |
image_num = int(image_num)
|
321 |
images = results if results else []
|
|
|
322 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
323 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for i in range(image_num)]
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
|
|
|
|
|
|
|
|
|
|
328 |
with lock:
|
329 |
-
if result and result[1]: images.append(result)
|
|
|
330 |
yield images
|
331 |
|
332 |
|
333 |
async def infer_multi_random(prompt: str, neg_prompt: str, results: list, image_num: float,
|
334 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
335 |
-
from tqdm.asyncio import tqdm_asyncio
|
336 |
import random
|
|
|
337 |
image_num = int(image_num)
|
338 |
images = results if results else []
|
|
|
339 |
random.seed()
|
340 |
model_names = random.choices(list(loaded_models.keys()), k = image_num)
|
341 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
342 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for model_name in model_names]
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
|
|
|
|
|
|
|
|
|
|
347 |
with lock:
|
348 |
-
if result and result[1]: images.append(result)
|
|
|
349 |
yield images
|
350 |
-
|
|
|
107 |
return gr.update(value=output_images), gr.update(value=output_paths)
|
108 |
|
109 |
|
110 |
+
# https://github.com/gradio-app/gradio/blob/main/gradio/external.py
|
111 |
+
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
|
112 |
def load_from_model(model_name: str, hf_token: str = None):
|
113 |
import httpx
|
114 |
import huggingface_hub
|
|
|
168 |
async def async_load_model(model: str):
|
169 |
async with sem:
|
170 |
try:
|
171 |
+
await asyncio.sleep(0.5)
|
172 |
return await asyncio.to_thread(load_model, model)
|
173 |
except Exception as e:
|
174 |
print(e)
|
|
|
309 |
try:
|
310 |
model = load_model(model_name)
|
311 |
if not model: return (Image.Image(), None)
|
312 |
+
image_path = model(prompt + seed, neg_prompt)
|
313 |
image = Image.open(image_path).convert('RGBA')
|
314 |
except Exception as e:
|
315 |
print(e)
|
|
|
319 |
|
320 |
async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: float, model_name: str,
|
321 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
322 |
+
import asyncio
|
323 |
+
progress(0, desc="Start inference.")
|
324 |
image_num = int(image_num)
|
325 |
images = results if results else []
|
326 |
+
image_num_offset = len(images)
|
327 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
328 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for i in range(image_num)]
|
329 |
+
for task in tasks:
|
330 |
+
progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.")
|
331 |
+
try:
|
332 |
+
result = await task
|
333 |
+
except Exception as e:
|
334 |
+
print(e)
|
335 |
+
task.cancel()
|
336 |
+
result = None
|
337 |
+
image_num_offset += 1
|
338 |
with lock:
|
339 |
+
if result and len(result) == 2 and result[1]: images.append(result)
|
340 |
+
await asyncio.sleep(0.05)
|
341 |
yield images
|
342 |
|
343 |
|
344 |
async def infer_multi_random(prompt: str, neg_prompt: str, results: list, image_num: float,
|
345 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
|
|
346 |
import random
|
347 |
+
progress(0, desc="Start inference.")
|
348 |
image_num = int(image_num)
|
349 |
images = results if results else []
|
350 |
+
image_num_offset = len(images)
|
351 |
random.seed()
|
352 |
model_names = random.choices(list(loaded_models.keys()), k = image_num)
|
353 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
354 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for model_name in model_names]
|
355 |
+
for task in tasks:
|
356 |
+
progress(float(len(images) - image_num_offset) / float(image_num), desc="Running inference.")
|
357 |
+
try:
|
358 |
+
result = await task
|
359 |
+
except Exception as e:
|
360 |
+
print(e)
|
361 |
+
task.cancel()
|
362 |
+
result = None
|
363 |
+
image_num_offset += 1
|
364 |
with lock:
|
365 |
+
if result and len(result) == 2 and result[1]: images.append(result)
|
366 |
+
await asyncio.sleep(0.05)
|
367 |
yield images
|
|