Spaces:
Runtime error
Runtime error
import gradio as gr | |
from random import randint | |
from burman_models import models # Custom Burman AI models | |
from externalmod import gr_Interface_load, randomize_seed | |
import asyncio | |
import os | |
from threading import RLock | |
# Lock for thread safety | |
lock = RLock() | |
HF_TOKEN = os.getenv("HF_TOKEN", None) # Hugging Face token if needed | |
# Load AI Models | |
def load_fn(models): | |
global models_load | |
models_load = {} | |
for model in models: | |
if model not in models_load.keys(): | |
try: | |
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN) | |
except Exception as error: | |
print(f"Error loading {model}:", error) | |
m = gr.Interface(lambda: None, ['text'], ['image']) | |
models_load[model] = m | |
load_fn(models) | |
# Configurations | |
num_models = 9 # Number of models to show | |
inference_timeout = 600 | |
MAX_SEED = 999999999 # Increased seed range for more randomness | |
starting_seed = randint(100000000, MAX_SEED) | |
def update_imgbox(choices): | |
return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices[:num_models]] | |
async def infer(model_str, prompt, seed=1, timeout=inference_timeout): | |
kwargs = {"seed": seed} | |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, **kwargs, token=HF_TOKEN)) | |
await asyncio.sleep(0) | |
try: | |
result = await asyncio.wait_for(task, timeout=timeout) | |
except Exception as e: | |
print(f"Error: {e}") | |
if not task.done(): | |
task.cancel() | |
result = None | |
return result | |
def generate_image(model_str, prompt, seed): | |
if model_str == 'NA': | |
return None | |
loop = asyncio.new_event_loop() | |
asyncio.set_event_loop(loop) | |
result = loop.run_until_complete(infer(model_str, prompt, seed)) | |
loop.close() | |
return result or "error.png" | |
# Gradio UI | |
demo = gr.Blocks(theme='dark') # Dark mode | |
with demo: | |
gr.Markdown("# 🖍️ Burman AI - AI-Powered Image Generator 🖍️") | |
with gr.Tab("Generate Images"): | |
with gr.Row(): | |
prompt_input = gr.Textbox(label='Enter your prompt:', lines=3, scale=3) | |
gen_button = gr.Button('Generate Image 🖌️', scale=1) | |
with gr.Row(): | |
seed_slider = gr.Slider(label="Seed (Optional)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3) | |
seed_button = gr.Button("Random Seed 🎲", scale=1) | |
seed_button.click(randomize_seed, None, [seed_slider]) | |
with gr.Row(): | |
output_images = [gr.Image(label=m) for m in models[:num_models]] | |
for model, img_output in zip(models[:num_models], output_images): | |
gen_button.click(generate_image, [model, prompt_input, seed_slider], img_output) | |
with gr.Tab("Model Selection"): | |
model_choice = gr.CheckboxGroup(models, label="Select models to use", value=models[:num_models]) | |
model_choice.change(update_imgbox, model_choice, output_images) | |
gr.Markdown("### Burman AI | Powered by Open-Source AI") | |
demo.queue() | |
demo.launch(share=True) | |