burman-ai commited on
Commit
623e4fa
·
verified ·
1 Parent(s): e2f4864

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -106
app.py CHANGED
@@ -1,16 +1,16 @@
1
  import gradio as gr
2
  from random import randint
3
- from all_models import models
4
-
5
  from externalmod import gr_Interface_load, randomize_seed
6
-
7
  import asyncio
8
  import os
9
  from threading import RLock
10
- lock = RLock()
11
- HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
12
 
 
 
 
13
 
 
14
  def load_fn(models):
15
  global models_load
16
  models_load = {}
@@ -20,126 +20,69 @@ def load_fn(models):
20
  try:
21
  m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
22
  except Exception as error:
23
- print(error)
24
  m = gr.Interface(lambda: None, ['text'], ['image'])
25
- models_load.update({model: m})
26
-
27
 
28
  load_fn(models)
29
 
30
-
31
- num_models = 9
32
-
33
- default_models = models[:num_models]
34
  inference_timeout = 600
35
- MAX_SEED=666666666
36
- starting_seed = randint(666666000, 666666666)
37
-
38
- def extend_choices(choices):
39
- return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
40
-
41
 
42
  def update_imgbox(choices):
43
- choices_plus = extend_choices(choices[:num_models])
44
- return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
45
 
46
  async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
47
- from pathlib import Path
48
- kwargs = {}
49
- noise = ""
50
- kwargs["seed"] = seed
51
- task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
52
- prompt=f'{prompt} {noise}', **kwargs, token=HF_TOKEN))
53
  await asyncio.sleep(0)
54
  try:
55
  result = await asyncio.wait_for(task, timeout=timeout)
56
- except (Exception, asyncio.TimeoutError) as e:
57
- print(e)
58
- print(f"Task timed out: {model_str}")
59
- if not task.done(): task.cancel()
60
  result = None
61
- if task.done() and result is not None:
62
- with lock:
63
- png_path = "image.png"
64
- result.save(png_path)
65
- image = str(Path(png_path).resolve())
66
- return image
67
- return None
68
-
69
-
70
-
71
 
72
- def gen_fnseed(model_str, prompt, seed=1):
73
  if model_str == 'NA':
74
  return None
75
- try:
76
- loop = asyncio.new_event_loop()
77
- result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
78
- except (Exception, asyncio.CancelledError) as e:
79
- print(e)
80
- print(f"Task aborted: {model_str}")
81
- result = None
82
- with lock:
83
- image = "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
84
- result = image
85
- finally:
86
- loop.close()
87
- return result
88
 
89
- with gr.Blocks(theme='Yntec/HaleyCH_Theme_craiyon') as demo:
90
- gr.HTML(
91
- """
92
- <div>
93
- <p> <center><img src="https://huggingface.co/spaces/Yntec/open-craiyon/resolve/main/open_craiyon.png" style="height:79px; width:367px; margin-top: -22px; margin-bottom: -44px;" span title="Best free ai art image generator open craiyon"></center>
94
- </p>
95
- """
96
- )
97
- with gr.Tab('🖍️ AI models drawing images from any prompt! 🖍️'):
98
- with gr.Row():
99
- txt_input = gr.Textbox(label='Your prompt:', lines=4, scale=3)
100
- gen_button = gr.Button('Draw it! 🖍️', scale=1)
101
  with gr.Row():
102
- seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 666666666)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
103
- seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary", scale=1)
104
- seed_rand.click(randomize_seed, None, [seed], queue=False)
105
- #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
106
-
107
- gen_button.click(lambda s: gr.update(interactive = True), None)
108
- gr.HTML(
109
- """
110
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
111
- <div>
112
- <body>
113
- <div class="center"><p style="margin-bottom: 10px; color: #5b6272;">Scroll down to see more images and select models.</p>
114
- </div>
115
- </body>
116
- </div>
117
- </div>
118
- """
119
- )
120
  with gr.Row():
121
- output = [gr.Image(min_width=480) for m in default_models]
122
- #output = [gr.Image(label = m, min_width=480) for m in default_models]
123
- current_models = [gr.Textbox(m, visible = False) for m in default_models]
124
-
125
- for m, o in zip(current_models, output):
126
- gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fnseed,
127
- inputs=[m, txt_input, seed], outputs=[o], concurrency_limit=None, queue=False)
128
- #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
129
- with gr.Accordion('Model selection'):
130
- model_choice = gr.CheckboxGroup(models, label = 'Untick the models you will not be using', value=default_models, interactive=True)
131
- #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
132
- model_choice.change(update_imgbox, model_choice, output)
133
- model_choice.change(extend_choices, model_choice, current_models)
134
  with gr.Row():
135
- gr.HTML(
136
- """
137
- <div class="footer">
138
- <p> For more than a hundred times more models (that's not a typo) check out <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>!</a>
139
- </p>
140
- """
141
- )
 
 
 
 
 
142
 
143
- demo.queue(default_concurrency_limit=200, max_size=200)
144
- demo.launch(show_api=False, max_threads=400)
145
 
 
1
  import gradio as gr
2
  from random import randint
3
+ from burman_models import models # Custom Burman AI models
 
4
  from externalmod import gr_Interface_load, randomize_seed
 
5
  import asyncio
6
  import os
7
  from threading import RLock
 
 
8
 
9
+ # Lock for thread safety
10
+ lock = RLock()
11
+ HF_TOKEN = os.getenv("HF_TOKEN", None) # Hugging Face token if needed
12
 
13
+ # Load AI Models
14
  def load_fn(models):
15
  global models_load
16
  models_load = {}
 
20
  try:
21
  m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
22
  except Exception as error:
23
+ print(f"Error loading {model}:", error)
24
  m = gr.Interface(lambda: None, ['text'], ['image'])
25
+ models_load[model] = m
 
26
 
27
  load_fn(models)
28
 
29
+ # Configurations
30
+ num_models = 9 # Number of models to show
 
 
31
  inference_timeout = 600
32
+ MAX_SEED = 999999999 # Increased seed range for more randomness
33
+ starting_seed = randint(100000000, MAX_SEED)
 
 
 
 
34
 
35
  def update_imgbox(choices):
36
+ return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices[:num_models]]
 
37
 
38
  async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
39
+ kwargs = {"seed": seed}
40
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, **kwargs, token=HF_TOKEN))
 
 
 
 
41
  await asyncio.sleep(0)
42
  try:
43
  result = await asyncio.wait_for(task, timeout=timeout)
44
+ except Exception as e:
45
+ print(f"Error: {e}")
46
+ if not task.done():
47
+ task.cancel()
48
  result = None
49
+ return result
 
 
 
 
 
 
 
 
 
50
 
51
+ def generate_image(model_str, prompt, seed):
52
  if model_str == 'NA':
53
  return None
54
+ loop = asyncio.new_event_loop()
55
+ asyncio.set_event_loop(loop)
56
+ result = loop.run_until_complete(infer(model_str, prompt, seed))
57
+ loop.close()
58
+ return result or "error.png"
 
 
 
 
 
 
 
 
59
 
60
+ # Gradio UI
61
+ demo = gr.Blocks(theme='dark') # Dark mode
62
+
63
+ with demo:
64
+ gr.Markdown("# 🖍️ Burman AI - AI-Powered Image Generator 🖍️")
65
+
66
+ with gr.Tab("Generate Images"):
 
 
 
 
 
67
  with gr.Row():
68
+ prompt_input = gr.Textbox(label='Enter your prompt:', lines=3, scale=3)
69
+ gen_button = gr.Button('Generate Image 🖌️', scale=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  with gr.Row():
71
+ seed_slider = gr.Slider(label="Seed (Optional)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
72
+ seed_button = gr.Button("Random Seed 🎲", scale=1)
73
+ seed_button.click(randomize_seed, None, [seed_slider])
 
 
 
 
 
 
 
 
 
 
74
  with gr.Row():
75
+ output_images = [gr.Image(label=m) for m in models[:num_models]]
76
+ for model, img_output in zip(models[:num_models], output_images):
77
+ gen_button.click(generate_image, [model, prompt_input, seed_slider], img_output)
78
+
79
+ with gr.Tab("Model Selection"):
80
+ model_choice = gr.CheckboxGroup(models, label="Select models to use", value=models[:num_models])
81
+ model_choice.change(update_imgbox, model_choice, output_images)
82
+
83
+ gr.Markdown("### Burman AI | Powered by Open-Source AI")
84
+
85
+ demo.queue()
86
+ demo.launch(share=True)
87
 
 
 
88