Yntec commited on
Commit
f2f301e
·
verified ·
1 Parent(s): 29b93e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -157
app.py CHANGED
@@ -1,158 +1,158 @@
1
- import gradio as gr
2
- import os
3
- from all_models import models
4
- from externalmod import gr_Interface_load, save_image, randomize_seed
5
- from prompt_extend import extend_prompt
6
- import asyncio
7
- from threading import RLock
8
- lock = RLock()
9
- HF_TOKEN = os.getenv("HF_TOKEN", None)
10
-
11
- inference_timeout = 600
12
- MAX_SEED = 2**32-1
13
- current_model = models[0]
14
- text_gen1 = extend_prompt
15
-
16
- kwargs = {}
17
- if HF_TOKEN: kwargs["hf_token"] = HF_TOKEN
18
- models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, **kwargs) for m in models]
19
-
20
- def text_it1(inputs, text_gen1=text_gen1):
21
- go_t1 = text_gen1(inputs)
22
- return(go_t1)
23
-
24
- def set_model(current_model):
25
- current_model = models[current_model]
26
- return gr.update(label=(f"{current_model}"))
27
-
28
- def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed):
29
- output1 = gen_fn(model_choice, inputs, neg_input, height, width, steps, cfg, seed)
30
- return (output1)
31
-
32
- # https://huggingface.co/docs/api-inference/detailed_parameters
33
- # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
34
- async def infer(model_index, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
35
- kwargs = {}
36
- if height > 0: kwargs["height"] = height
37
- if width > 0: kwargs["width"] = width
38
- if steps > 0: kwargs["num_inference_steps"] = steps
39
- if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
40
- if seed == -1: kwargs["seed"] = randomize_seed()
41
- else: kwargs["seed"] = seed
42
- task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
43
- prompt=prompt, negative_prompt=nprompt, **kwargs))
44
- await asyncio.sleep(0)
45
- try:
46
- result = await asyncio.wait_for(task, timeout=timeout)
47
- except asyncio.TimeoutError as e:
48
- print(e)
49
- print(f"Task timed out: {models[model_index]}")
50
- if not task.done(): task.cancel()
51
- result = None
52
- raise Exception(f"Task timed out: {models[model_index]}") from e
53
- except Exception as e:
54
- print(e)
55
- if not task.done(): task.cancel()
56
- result = None
57
- raise Exception() from e
58
- if task.done() and result is not None and not isinstance(result, tuple):
59
- with lock:
60
- png_path = "image.png"
61
- image = save_image(result, png_path, models[model_index], prompt, nprompt, height, width, steps, cfg, seed)
62
- return image
63
- return None
64
-
65
- def gen_fn(model_index, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
66
- try:
67
- loop = asyncio.new_event_loop()
68
- result = loop.run_until_complete(infer(model_index, prompt, nprompt,
69
- height, width, steps, cfg, seed, inference_timeout))
70
- except (Exception, asyncio.CancelledError) as e:
71
- print(e)
72
- print(f"Task aborted: {models[model_index]}")
73
- with lock:
74
- image = "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
75
- result = image
76
- raise gr.Error(f"Task aborted: {models[model_index]}, Error: {e}")
77
- finally:
78
- loop.close()
79
- return result
80
-
81
- css="""
82
- .gradio-container {background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
83
- color: #ffaa66 !important; font-family: 'IBM Plex Sans', sans-serif !important;}
84
- h1 {font-size: 6em; color: #ffc99f; margin-top: 30px; margin-bottom: 30px;
85
- text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;}
86
- h3 {color: #ffc99f; !important;}
87
- h4 {display: inline-block; color: #ffffff !important;}
88
- .wrapper img {font-size: 98% !important; white-space: nowrap !important; text-align: center !important;
89
- display: inline-block !important; color: #ffffff !important;}
90
- .wrapper {color: #ffffff !important;}
91
- .gr-box {background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
92
- border-top-color: #000000 !important; border-right-color: #ffffff !important;
93
- border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
94
- """
95
-
96
- with gr.Blocks(theme='John6666/YntecDark', fill_width=True, css=css) as myface:
97
- gr.HTML(f"""
98
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
99
- <div class="center"><h1>Blitz Diffusion</h1></div>
100
- <p style="margin-bottom: 1px; color: #ffaa66;">
101
- <h3>{int(len(models))} Stable Diffusion models, but why? For your enjoyment!</h3></p>
102
- <br><div class="wrapper">2025.3.7 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 13 new models since last update!</div>
103
- <p style="margin-bottom: 1px; font-size: 98%">
104
- <br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
105
- <p style="margin-bottom: 1px; color: #ffffff;">
106
- <br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 different models at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Huggingface Diffusion!</a></u>!
107
- </p></p></div>
108
- """, elem_classes="gr-box")
109
- with gr.Row():
110
- with gr.Column(scale=100):
111
- # Model selection dropdown
112
- model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index",
113
- value=current_model, interactive=True, elem_classes=["gr-box", "gr-input"])
114
- with gr.Row():
115
- with gr.Column(scale=100):
116
- with gr.Group():
117
- magic1 = gr.Textbox(label="Your Prompt", lines=4, elem_classes=["gr-box", "gr-input"]) #Positive
118
- with gr.Accordion("Advanced", open=False, visible=True):
119
- neg_input = gr.Textbox(label='Negative prompt', lines=1, elem_classes=["gr-box", "gr-input"])
120
- with gr.Row():
121
- width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
122
- height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
123
- with gr.Row():
124
- steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=33, step=1, value=0, elem_classes=["gr-box", "gr-input"])
125
- cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=-1, elem_classes=["gr-box", "gr-input"])
126
- seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, elem_classes=["gr-box", "gr-input"])
127
- seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
128
- run = gr.Button("Generate Image", variant="primary", elem_classes="gr-button")
129
-
130
- with gr.Row():
131
- with gr.Column():
132
- output1 = gr.Image(label=(f"{current_model}"), show_download_button=True,
133
- interactive=False, show_share_button=False, format=".png", elem_classes="gr-box")
134
-
135
- with gr.Row():
136
- with gr.Column(scale=50):
137
- input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2, elem_classes=["gr-box", "gr-input"])
138
- see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above", variant="primary", elem_classes="gr-button")
139
- use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above", variant="primary", elem_classes="gr-button")
140
- def short_prompt(inputs):
141
- return (inputs)
142
-
143
- model_name1.change(set_model, inputs=model_name1, outputs=[output1])
144
- gr.on(
145
- triggers=[run.click, magic1.submit],
146
- fn=send_it1,
147
- inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed],
148
- outputs=[output1],
149
- concurrency_limit=None,
150
- queue=False,
151
- )
152
- use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
153
- see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
154
- seed_rand.click(randomize_seed, None, [seed], queue=False)
155
-
156
- myface.queue(default_concurrency_limit=200, max_size=200)
157
- myface.launch(show_api=False, max_threads=400)
158
  # https://github.com/gradio-app/gradio/issues/6339
 
1
+ import gradio as gr
2
+ import os
3
+ from all_models import models
4
+ from externalmod import gr_Interface_load, save_image, randomize_seed
5
+ from prompt_extend import extend_prompt
6
+ import asyncio
7
+ from threading import RLock
8
+ lock = RLock()
9
+ HF_TOKEN = os.getenv("HF_TOKEN", None)
10
+
11
+ inference_timeout = 600
12
+ MAX_SEED = 2**32-1
13
+ current_model = models[0]
14
+ text_gen1 = extend_prompt
15
+
16
+ kwargs = {}
17
+ if HF_TOKEN: kwargs["hf_token"] = HF_TOKEN
18
+ models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, **kwargs) for m in models]
19
+
20
+ def text_it1(inputs, text_gen1=text_gen1):
21
+ go_t1 = text_gen1(inputs)
22
+ return(go_t1)
23
+
24
+ def set_model(current_model):
25
+ current_model = models[current_model]
26
+ return gr.update(label=(f"{current_model}"))
27
+
28
+ def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed):
29
+ output1 = gen_fn(model_choice, inputs, neg_input, height, width, steps, cfg, seed)
30
+ return (output1)
31
+
32
+ # https://huggingface.co/docs/api-inference/detailed_parameters
33
+ # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
34
+ async def infer(model_index, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
35
+ kwargs = {}
36
+ if height > 0: kwargs["height"] = height
37
+ if width > 0: kwargs["width"] = width
38
+ if steps > 0: kwargs["num_inference_steps"] = steps
39
+ if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
40
+ if seed == -1: kwargs["seed"] = randomize_seed()
41
+ else: kwargs["seed"] = seed
42
+ task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
43
+ prompt=prompt, negative_prompt=nprompt, **kwargs))
44
+ await asyncio.sleep(0)
45
+ try:
46
+ result = await asyncio.wait_for(task, timeout=timeout)
47
+ except asyncio.TimeoutError as e:
48
+ print(e)
49
+ print(f"Task timed out: {models[model_index]}")
50
+ if not task.done(): task.cancel()
51
+ result = None
52
+ raise Exception(f"Task timed out: {models[model_index]}") from e
53
+ except Exception as e:
54
+ print(e)
55
+ if not task.done(): task.cancel()
56
+ result = None
57
+ raise Exception() from e
58
+ if task.done() and result is not None and not isinstance(result, tuple):
59
+ with lock:
60
+ png_path = "image.png"
61
+ image = save_image(result, png_path, models[model_index], prompt, nprompt, height, width, steps, cfg, seed)
62
+ return image
63
+ return None
64
+
65
+ def gen_fn(model_index, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
66
+ try:
67
+ loop = asyncio.new_event_loop()
68
+ result = loop.run_until_complete(infer(model_index, prompt, nprompt,
69
+ height, width, steps, cfg, seed, inference_timeout))
70
+ except (Exception, asyncio.CancelledError) as e:
71
+ print(e)
72
+ print(f"Task aborted: {models[model_index]}")
73
+ with lock:
74
+ image = "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
75
+ result = image
76
+ raise gr.Error(f"Task aborted: {models[model_index]}, Error: {e}")
77
+ finally:
78
+ loop.close()
79
+ return result
80
+
81
+ css="""
82
+ .gradio-container {background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
83
+ color: #ffaa66 !important; font-family: 'IBM Plex Sans', sans-serif !important;}
84
+ h1 {font-size: 6em; color: #ffc99f; margin-top: 30px; margin-bottom: 30px;
85
+ text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;}
86
+ h3 {color: #ffc99f; !important;}
87
+ h4 {display: inline-block; color: #ffffff !important;}
88
+ .wrapper img {font-size: 98% !important; white-space: nowrap !important; text-align: center !important;
89
+ display: inline-block !important; color: #ffffff !important;}
90
+ .wrapper {color: #ffffff !important;}
91
+ .gr-box {background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
92
+ border-top-color: #000000 !important; border-right-color: #ffffff !important;
93
+ border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
94
+ """
95
+
96
+ with gr.Blocks(theme='John6666/YntecDark', fill_width=True, css=css) as myface:
97
+ gr.HTML(f"""
98
+ <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
99
+ <div class="center"><h1>Blitz Diffusion</h1></div>
100
+ <p style="margin-bottom: 1px; color: #ffaa66;">
101
+ <h3>{int(len(models))} Stable Diffusion models, but why? For your enjoyment!</h3></p>
102
+ <br><div class="wrapper">2025.4.4 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 15 new models since last update!</div>
103
+ <p style="margin-bottom: 1px; font-size: 98%">
104
+ <br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
105
+ <p style="margin-bottom: 1px; color: #ffffff;">
106
+ <br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 different models at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Huggingface Diffusion!</a></u>!
107
+ </p></p></div>
108
+ """, elem_classes="gr-box")
109
+ with gr.Row():
110
+ with gr.Column(scale=100):
111
+ # Model selection dropdown
112
+ model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index",
113
+ value=current_model, interactive=True, elem_classes=["gr-box", "gr-input"])
114
+ with gr.Row():
115
+ with gr.Column(scale=100):
116
+ with gr.Group():
117
+ magic1 = gr.Textbox(label="Your Prompt", lines=4, elem_classes=["gr-box", "gr-input"]) #Positive
118
+ with gr.Accordion("Advanced", open=False, visible=True):
119
+ neg_input = gr.Textbox(label='Negative prompt', lines=1, elem_classes=["gr-box", "gr-input"])
120
+ with gr.Row():
121
+ width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
122
+ height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
123
+ with gr.Row():
124
+ steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=33, step=1, value=0, elem_classes=["gr-box", "gr-input"])
125
+ cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=-1, elem_classes=["gr-box", "gr-input"])
126
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, elem_classes=["gr-box", "gr-input"])
127
+ seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
128
+ run = gr.Button("Generate Image", variant="primary", elem_classes="gr-button")
129
+
130
+ with gr.Row():
131
+ with gr.Column():
132
+ output1 = gr.Image(label=(f"{current_model}"), show_download_button=True,
133
+ interactive=False, show_share_button=False, format=".png", elem_classes="gr-box")
134
+
135
+ with gr.Row():
136
+ with gr.Column(scale=50):
137
+ input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2, elem_classes=["gr-box", "gr-input"])
138
+ see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above", variant="primary", elem_classes="gr-button")
139
+ use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above", variant="primary", elem_classes="gr-button")
140
+ def short_prompt(inputs):
141
+ return (inputs)
142
+
143
+ model_name1.change(set_model, inputs=model_name1, outputs=[output1])
144
+ gr.on(
145
+ triggers=[run.click, magic1.submit],
146
+ fn=send_it1,
147
+ inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed],
148
+ outputs=[output1],
149
+ concurrency_limit=None,
150
+ queue=False,
151
+ )
152
+ use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
153
+ see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
154
+ seed_rand.click(randomize_seed, None, [seed], queue=False)
155
+
156
+ myface.queue(default_concurrency_limit=200, max_size=200)
157
+ myface.launch(show_api=False, max_threads=400)
158
  # https://github.com/gradio-app/gradio/issues/6339