Spaces:
Running
Running
import os | |
import gradio as gr | |
from random import randint | |
from all_models import models | |
from datetime import datetime | |
from externalmod import gr_Interface_load | |
import asyncio | |
import os | |
from threading import RLock | |
lock = RLock() | |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary. | |
now2 = 0 | |
nb_models=24 | |
inference_timeout = 300 | |
MAX_SEED = 2**32-1 | |
def split_models(models,nb_models): | |
models_temp=[] | |
models_lis_temp=[] | |
i=0 | |
for m in models: | |
models_temp.append(m) | |
i=i+1 | |
if i%nb_models==0: | |
models_lis_temp.append(models_temp) | |
models_temp=[] | |
if len(models_temp)>1: | |
models_lis_temp.append(models_temp) | |
return models_lis_temp | |
def split_models_axb(models,a,b): | |
models_temp=[] | |
models_lis_temp=[] | |
i=0 | |
nb_models=b | |
for m in models: | |
for j in range(a): | |
models_temp.append(m) | |
i=i+1 | |
if i%nb_models==0: | |
models_lis_temp.append(models_temp) | |
models_temp=[] | |
if len(models_temp)>1: | |
models_lis_temp.append(models_temp) | |
return models_lis_temp , a*b | |
def split_models_8x3(models,nb_models): | |
models_temp=[] | |
models_lis_temp=[] | |
i=0 | |
nb_models_x3=8 | |
for m in models: | |
models_temp.append(m) | |
i=i+1 | |
if i%nb_models_x3==0: | |
models_lis_temp.append(models_temp+models_temp+models_temp) | |
models_temp=[] | |
if len(models_temp)>1: | |
models_lis_temp.append(models_temp+models_temp+models_temp) | |
return models_lis_temp | |
"""models_test=split_models_x3(models,nb_models)""" | |
"""models_test=split_models(models,nb_models)""" | |
models_test , nb_models =split_models_axb(models,2,20) | |
def get_current_time(): | |
now = datetime.now() | |
now2 = now | |
current_time = now2.strftime("%Y-%m-%d %H:%M:%S") | |
kii = "" # ? | |
ki = f'{kii} {current_time}' | |
return ki | |
def load_fn_original(models): | |
global models_load | |
global num_models | |
global default_models | |
models_load = {} | |
num_models = len(models) | |
if num_models!=0: | |
default_models = models[:num_models] | |
else: | |
default_models = {} | |
for model in models: | |
if model not in models_load.keys(): | |
try: | |
m = gr.load(f'models/{model}') | |
except Exception as error: | |
m = gr.Interface(lambda txt: None, ['text'], ['image']) | |
print(error) | |
models_load.update({model: m}) | |
def load_fn(models): | |
global models_load | |
global num_models | |
global default_models | |
models_load = {} | |
num_models = len(models) | |
if num_models!=0: | |
default_models = models[:num_models] | |
else: | |
default_models = {} | |
for model in models: | |
if model not in models_load.keys(): | |
try: | |
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN) | |
except Exception as error: | |
m = gr.Interface(lambda txt: None, ['text'], ['image']) | |
print(error) | |
models_load.update({model: m}) | |
"""models = models_test[1]""" | |
#load_fn_original | |
load_fn(models) | |
"""models = {} | |
load_fn(models)""" | |
def extend_choices(choices): | |
return choices + (nb_models - len(choices)) * ['NA'] | |
"""return choices + (num_models - len(choices)) * ['NA']""" | |
def extend_choices_b(choices): | |
choices_plus = extend_choices(choices) | |
return [gr.Textbox(m, visible=False) for m in choices_plus] | |
def update_imgbox(choices): | |
choices_plus = extend_choices(choices) | |
return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus] | |
def choice_group_a(group_model_choice): | |
for m in models_test: | |
if group_model_choice==m[1]: | |
choice=m | |
print(choice) | |
return choice | |
def choice_group_b(group_model_choice): | |
choice=choice_group_a(group_model_choice) | |
choice = extend_choices(choice) | |
"""return [gr.Image(label=m, min_width=170, height=170) for m in choice]""" | |
return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choice] | |
def choice_group_c(group_model_choice): | |
choice=choice_group_a(group_model_choice) | |
choice = extend_choices(choice) | |
return [gr.Textbox(m, visible=False) for m in choice] | |
def choice_group_d(var_Test): | |
(gen_button,stop_button,output,current_models, txt_input)=var_Test | |
for m, o in zip(current_models, output): | |
gen_event = gen_button.click(gen_fn, [m, txt_input], o) | |
stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event]) | |
return gen_event | |
def test_pass(test): | |
if test==os.getenv('p'): | |
print("ok") | |
return gr.Dropdown(label="test Model", show_label=False, choices=list(models_test) , allow_custom_value=True) | |
else: | |
print("nop") | |
return gr.Dropdown(label="test Model", show_label=False, choices=list([]) , allow_custom_value=True) | |
# https://huggingface.co/docs/api-inference/detailed_parameters | |
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client | |
async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout): | |
from pathlib import Path | |
kwargs = {} | |
if height is not None and height >= 256: kwargs["height"] = height | |
if width is not None and width >= 256: kwargs["width"] = width | |
if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps | |
if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg | |
noise = "" | |
if seed >= 0: kwargs["seed"] = seed | |
else: | |
rand = randint(1, 500) | |
for i in range(rand): | |
noise += " " | |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, | |
prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN)) | |
await asyncio.sleep(0) | |
try: | |
result = await asyncio.wait_for(task, timeout=timeout) | |
except (Exception, asyncio.TimeoutError) as e: | |
print(e) | |
print(f"Task timed out: {model_str}") | |
if not task.done(): task.cancel() | |
result = None | |
if task.done() and result is not None: | |
with lock: | |
png_path = "image.png" | |
result.save(png_path) | |
image = str(Path(png_path).resolve()) | |
return image | |
return None | |
def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1): | |
if model_str == 'NA': | |
return None | |
try: | |
loop = asyncio.new_event_loop() | |
result = loop.run_until_complete(infer(model_str, prompt, nprompt, | |
height, width, steps, cfg, seed, inference_timeout)) | |
except (Exception, asyncio.CancelledError) as e: | |
print(e) | |
print(f"Task aborted: {model_str}") | |
result = None | |
finally: | |
loop.close() | |
return result | |
def gen_fn_original(model_str, prompt): | |
if model_str == 'NA': | |
return None | |
noise = str(randint(0, 9999)) | |
try : | |
m=models_load[model_str](f'{prompt} {noise}') | |
except Exception as error : | |
print("error : " + model_str) | |
print(error) | |
m=False | |
return m | |
def make_me(): | |
# with gr.Tab('The Dream'): | |
with gr.Row(): | |
#txt_input = gr.Textbox(lines=3, width=300, max_height=100) | |
#txt_input = gr.Textbox(label='Your prompt:', lines=3, width=300, max_height=100) | |
with gr.Column(scale=4): | |
with gr.Group(): | |
txt_input = gr.Textbox(label='Your prompt:', lines=3) | |
with gr.Accordion("Advanced", open=False, visible=True): | |
neg_input = gr.Textbox(label='Negative prompt:', lines=1) | |
with gr.Row(): | |
width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0) | |
height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0) | |
with gr.Row(): | |
steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0) | |
cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0) | |
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1) | |
#gen_button = gr.Button('Generate images', width=150, height=30) | |
#stop_button = gr.Button('Stop', variant='secondary', interactive=False, width=150, height=30) | |
gen_button = gr.Button('Generate images', scale=3) | |
stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1) | |
gen_button.click(lambda: gr.update(interactive=True), None, stop_button) | |
#gr.HTML(""" | |
#<div style="text-align: center; max-width: 100%; margin: 0 auto;"> | |
# <body> | |
# </body> | |
#</div> | |
#""") | |
with gr.Row(): | |
"""output = [gr.Image(label=m, min_width=170, height=170) for m in default_models] | |
current_models = [gr.Textbox(m, visible=False) for m in default_models]""" | |
"""choices=[models_test[0][0]]""" | |
choices=models_test[0] | |
"""output = [gr.Image(label=m, min_width=170, height=170) for m in choices] | |
current_models = [gr.Textbox(m, visible=False) for m in choices]""" | |
output = update_imgbox([choices[0]]) | |
current_models = extend_choices_b([choices[0]]) | |
for m, o in zip(current_models, output): | |
#gen_event = gen_button.click(gen_fn_original, [m, txt_input], o) | |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn, | |
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o]) | |
stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event]) | |
"""with gr.Accordion('Model selection'): | |
model_choice = gr.CheckboxGroup(models, label=f' {num_models} different models selected', value=default_models, multiselect=True, max_choices=num_models, interactive=True, filterable=False) | |
model_choice.change(update_imgbox, (gen_button,stop_button,group_model_choice), output) | |
model_choice.change(extend_choices, model_choice, current_models) | |
""" | |
with gr.Accordion("test", open=True): | |
"""group_model_choice = gr.Dropdown(label="test Model", show_label=False, choices=list(models_test) , allow_custom_value=True)""" | |
group_model_choice = gr.Dropdown(label="test Model", show_label=False, choices=list([]) , allow_custom_value=True) | |
group_model_choice.change(choice_group_b,group_model_choice,output) | |
group_model_choice.change(choice_group_c,group_model_choice,current_models) | |
"""group_model_choice.change(choice_group_d,(gen_button,stop_button,output,current_models,txt_input),gen_event)""" | |
with gr.Row(): | |
#txt_input_p = gr.Textbox(label='test', lines=1, width=300, max_height=100) | |
txt_input_p = gr.Textbox(label='test', lines=1) | |
#test_button = gr.Button('test', width=30, height=10) | |
test_button = gr.Button('test') | |
test_button.click(test_pass,txt_input_p,group_model_choice) | |
with gr.Row(): | |
gr.HTML(""" | |
<div class="footer"> | |
<p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier! | |
</p> | |
""") | |
js_code = """ | |
console.log('ghgh'); | |
""" | |
with gr.Blocks(theme="Nymbo/Nymbo_Theme", fill_width=True, css="div.float.svelte-1mwvhlq { position: absolute; top: var(--block-label-margin); left: var(--block-label-margin); background: none; border: none;}") as demo: | |
gr.Markdown("<script>" + js_code + "</script>") | |
make_me() | |
# https://www.gradio.app/guides/setting-up-a-demo-for-maximum-performance | |
#demo.queue(concurrency_count=999) # concurrency_count is deprecated in 4.x | |
demo.queue() | |
demo.launch() | |