burman-ai commited on
Commit
0344970
·
verified ·
1 Parent(s): 4580970

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -69
app.py CHANGED
@@ -1,93 +1,144 @@
1
  import gradio as gr
2
- import asyncio
3
- import os
4
  from random import randint
5
- from threading import RLock
6
  from all_models import models
 
7
  from externalmod import gr_Interface_load, randomize_seed
8
 
9
- # Lock to prevent concurrent access issues
 
 
10
  lock = RLock()
11
- HF_TOKEN = os.getenv("HF_TOKEN", None) # Hugging Face token for private models
12
 
13
- # Load models
14
- def load_models(models):
15
  global models_load
16
  models_load = {}
17
 
18
  for model in models:
19
- if model not in models_load:
20
  try:
21
- models_load[model] = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
22
  except Exception as error:
23
- print(f"Error loading model {model}: {error}")
24
- models_load[model] = None # Handle failed model load
25
-
26
- load_models(models)
27
-
28
- # Constants
29
- NUM_MODELS = 9
30
- DEFAULT_MODELS = models[:NUM_MODELS]
31
- INFERENCE_TIMEOUT = 600
32
- MAX_SEED = 666666666
33
- starting_seed = randint(666666000, MAX_SEED)
34
-
35
- # Async inference function
36
- async def infer(model_str, prompt, seed=1, timeout=INFERENCE_TIMEOUT):
37
- if model_str not in models_load or models_load[model_str] is None:
38
- return "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
39
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  try:
41
- result = await asyncio.wait_for(
42
- asyncio.to_thread(models_load[model_str].fn, prompt=prompt, seed=seed, token=HF_TOKEN),
43
- timeout=timeout
44
- )
45
- return result if result else "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
46
- except asyncio.TimeoutError:
47
- print(f"Timeout error: {model_str}")
48
- except Exception as e:
49
- print(f"Error in inference: {e}")
50
- return "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
51
-
52
- # Synchronous wrapper
53
- def generate_image(model_str, prompt, seed=1):
 
 
 
 
 
54
  if model_str == 'NA':
55
  return None
56
-
57
- loop = asyncio.new_event_loop()
58
- asyncio.set_event_loop(loop)
59
  try:
60
- return loop.run_until_complete(infer(model_str, prompt, seed))
61
- except Exception as e:
62
- print(f"Error generating image: {e}")
63
- return "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
 
 
 
 
 
64
  finally:
65
  loop.close()
 
66
 
67
- # Gradio UI
68
  with gr.Blocks(theme='Yntec/HaleyCH_Theme_craiyon') as demo:
69
- gr.HTML("""<center><img src='https://huggingface.co/spaces/Yntec/open-craiyon/resolve/main/open_craiyon.png' height='79'></center>""")
70
-
71
- with gr.Tab('🖍️ AI Image Generator 🖍️'):
72
- txt_input = gr.Textbox(label='Enter your prompt:', lines=4)
73
- gen_button = gr.Button('Generate Image 🖍️')
74
-
75
- seed_slider = gr.Slider(label="Seed (for reproducibility)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed)
76
- random_seed_btn = gr.Button("Randomize Seed 🎲")
77
-
78
- random_seed_btn.click(randomize_seed, None, [seed_slider])
79
-
80
- output_images = [gr.Image(label=m) for m in DEFAULT_MODELS]
81
- model_inputs = [gr.Textbox(m, visible=False) for m in DEFAULT_MODELS]
82
-
83
- for model, img_output in zip(model_inputs, output_images):
84
- gen_button.click(generate_image, inputs=[model, txt_input, seed_slider], outputs=[img_output])
85
-
86
- gr.Accordion("Model Selection", open=False):
87
- model_choice = gr.CheckboxGroup(models, label="Select Models", value=DEFAULT_MODELS)
88
- model_choice.change(lambda selected: [gr.Image(visible=m in selected) for m in models], inputs=[model_choice], outputs=[output_images])
89
-
90
- gr.HTML("""<p>Check out more models at <a href='https://huggingface.co/spaces/Yntec/ToyWorld'>Toy World</a>!</p>""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  demo.queue(default_concurrency_limit=200, max_size=200)
93
  demo.launch(show_api=False, max_threads=400)
 
1
  import gradio as gr
 
 
2
  from random import randint
 
3
  from all_models import models
4
+
5
  from externalmod import gr_Interface_load, randomize_seed
6
 
7
+ import asyncio
8
+ import os
9
+ from threading import RLock
10
  lock = RLock()
11
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
12
 
13
+
14
+ def load_fn(models):
15
  global models_load
16
  models_load = {}
17
 
18
  for model in models:
19
+ if model not in models_load.keys():
20
  try:
21
+ m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
22
  except Exception as error:
23
+ print(error)
24
+ m = gr.Interface(lambda: None, ['text'], ['image'])
25
+ models_load.update({model: m})
26
+
27
+
28
+ load_fn(models)
29
+
30
+
31
+ num_models = 9
32
+
33
+ default_models = models[:num_models]
34
+ inference_timeout = 600
35
+ MAX_SEED=666666666
36
+ starting_seed = randint(666666000, 666666666)
37
+
38
+ def extend_choices(choices):
39
+ return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
40
+
41
+
42
+ def update_imgbox(choices):
43
+ choices_plus = extend_choices(choices[:num_models])
44
+ return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
45
+
46
+ async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
47
+ from pathlib import Path
48
+ kwargs = {}
49
+ noise = ""
50
+ kwargs["seed"] = seed
51
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
52
+ prompt=f'{prompt} {noise}', **kwargs, token=HF_TOKEN))
53
+ await asyncio.sleep(0)
54
  try:
55
+ result = await asyncio.wait_for(task, timeout=timeout)
56
+ except (Exception, asyncio.TimeoutError) as e:
57
+ print(e)
58
+ print(f"Task timed out: {model_str}")
59
+ if not task.done(): task.cancel()
60
+ result = None
61
+ if task.done() and result is not None:
62
+ with lock:
63
+ png_path = "image.png"
64
+ result.save(png_path)
65
+ image = str(Path(png_path).resolve())
66
+ return image
67
+ return None
68
+
69
+
70
+
71
+
72
+ def gen_fnseed(model_str, prompt, seed=1):
73
  if model_str == 'NA':
74
  return None
 
 
 
75
  try:
76
+ loop = asyncio.new_event_loop()
77
+ result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
78
+ except (Exception, asyncio.CancelledError) as e:
79
+ print(e)
80
+ print(f"Task aborted: {model_str}")
81
+ result = None
82
+ with lock:
83
+ image = "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
84
+ result = image
85
  finally:
86
  loop.close()
87
+ return result
88
 
 
89
  with gr.Blocks(theme='Yntec/HaleyCH_Theme_craiyon') as demo:
90
+ gr.HTML(
91
+ """
92
+ <div>
93
+ <p> <center><img src="https://huggingface.co/spaces/Yntec/open-craiyon/resolve/main/open_craiyon.png" style="height:79px; width:367px; margin-top: -22px; margin-bottom: -44px;" span title="Best free ai art image generator open craiyon"></center>
94
+ </p>
95
+ """
96
+ )
97
+ with gr.Tab('🖍️ AI models drawing images from any prompt! 🖍️'):
98
+ with gr.Row():
99
+ txt_input = gr.Textbox(label='Your prompt:', lines=4, scale=3)
100
+ gen_button = gr.Button('Draw it! 🖍️', scale=1)
101
+ with gr.Row():
102
+ seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 666666666)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
103
+ seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary", scale=1)
104
+ seed_rand.click(randomize_seed, None, [seed], queue=False)
105
+ #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
106
+
107
+ gen_button.click(lambda s: gr.update(interactive = True), None)
108
+ gr.HTML(
109
+ """
110
+ <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
111
+ <div>
112
+ <body>
113
+ <div class="center"><p style="margin-bottom: 10px; color: #5b6272;">Scroll down to see more images and select models.</p>
114
+ </div>
115
+ </body>
116
+ </div>
117
+ </div>
118
+ """
119
+ )
120
+ with gr.Row():
121
+ output = [gr.Image(min_width=480) for m in default_models]
122
+ #output = [gr.Image(label = m, min_width=480) for m in default_models]
123
+ current_models = [gr.Textbox(m, visible = False) for m in default_models]
124
+
125
+ for m, o in zip(current_models, output):
126
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fnseed,
127
+ inputs=[m, txt_input, seed], outputs=[o], concurrency_limit=None, queue=False)
128
+ #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
129
+ with gr.Accordion('Model selection'):
130
+ model_choice = gr.CheckboxGroup(models, label = 'Untick the models you will not be using', value=default_models, interactive=True)
131
+ #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
132
+ model_choice.change(update_imgbox, model_choice, output)
133
+ model_choice.change(extend_choices, model_choice, current_models)
134
+ with gr.Row():
135
+ gr.HTML(
136
+ """
137
+ <div class="footer">
138
+ <p> For more than a hundred times more models (that's not a typo) check out <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>!</a>
139
+ </p>
140
+ """
141
+ )
142
 
143
  demo.queue(default_concurrency_limit=200, max_size=200)
144
  demo.launch(show_api=False, max_threads=400)