Spaces:
Runtime error
Runtime error
File size: 3,099 Bytes
235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 623e4fa 235ffc4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import gradio as gr
from random import randint
from burman_models import models # Custom Burman AI models
from externalmod import gr_Interface_load, randomize_seed
import asyncio
import os
from threading import RLock
# Lock for thread safety
lock = RLock()
HF_TOKEN = os.getenv("HF_TOKEN", None) # Hugging Face token if needed
# Load AI Models
def load_fn(models):
global models_load
models_load = {}
for model in models:
if model not in models_load.keys():
try:
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
except Exception as error:
print(f"Error loading {model}:", error)
m = gr.Interface(lambda: None, ['text'], ['image'])
models_load[model] = m
load_fn(models)
# Configurations
num_models = 9 # Number of models to show
inference_timeout = 600
MAX_SEED = 999999999 # Increased seed range for more randomness
starting_seed = randint(100000000, MAX_SEED)
def update_imgbox(choices):
return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices[:num_models]]
async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
kwargs = {"seed": seed}
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, **kwargs, token=HF_TOKEN))
await asyncio.sleep(0)
try:
result = await asyncio.wait_for(task, timeout=timeout)
except Exception as e:
print(f"Error: {e}")
if not task.done():
task.cancel()
result = None
return result
def generate_image(model_str, prompt, seed):
if model_str == 'NA':
return None
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(infer(model_str, prompt, seed))
loop.close()
return result or "error.png"
# Gradio UI
demo = gr.Blocks(theme='dark') # Dark mode
with demo:
gr.Markdown("# 🖍️ Burman AI - AI-Powered Image Generator 🖍️")
with gr.Tab("Generate Images"):
with gr.Row():
prompt_input = gr.Textbox(label='Enter your prompt:', lines=3, scale=3)
gen_button = gr.Button('Generate Image 🖌️', scale=1)
with gr.Row():
seed_slider = gr.Slider(label="Seed (Optional)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
seed_button = gr.Button("Random Seed 🎲", scale=1)
seed_button.click(randomize_seed, None, [seed_slider])
with gr.Row():
output_images = [gr.Image(label=m) for m in models[:num_models]]
for model, img_output in zip(models[:num_models], output_images):
gen_button.click(generate_image, [model, prompt_input, seed_slider], img_output)
with gr.Tab("Model Selection"):
model_choice = gr.CheckboxGroup(models, label="Select models to use", value=models[:num_models])
model_choice.change(update_imgbox, model_choice, output_images)
gr.Markdown("### Burman AI | Powered by Open-Source AI")
demo.queue()
demo.launch(share=True)
|