Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,12 @@
|
|
1 |
import gradio as gr
|
2 |
from random import randint
|
3 |
from all_models import models
|
4 |
-
|
5 |
from externalmod import gr_Interface_load
|
6 |
|
7 |
import asyncio
|
8 |
import os
|
9 |
from threading import RLock
|
10 |
lock = RLock()
|
11 |
-
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
12 |
-
|
13 |
|
14 |
def load_fn(models):
|
15 |
global models_load
|
@@ -18,7 +15,7 @@ def load_fn(models):
|
|
18 |
for model in models:
|
19 |
if model not in models_load.keys():
|
20 |
try:
|
21 |
-
m = gr_Interface_load(f'models/{model}'
|
22 |
except Exception as error:
|
23 |
print(error)
|
24 |
m = gr.Interface(lambda: None, ['text'], ['image'])
|
@@ -26,44 +23,32 @@ def load_fn(models):
|
|
26 |
|
27 |
load_fn(models)
|
28 |
|
29 |
-
|
30 |
num_models = 1
|
31 |
default_models = models[:num_models]
|
32 |
inference_timeout = 600
|
33 |
|
34 |
-
MAX_SEED=3999999999
|
35 |
-
|
36 |
-
|
37 |
|
38 |
def extend_choices(choices):
|
39 |
return choices + (num_models - len(choices)) * ['NA']
|
40 |
|
41 |
-
|
42 |
def update_imgbox(choices):
|
43 |
choices_plus = extend_choices(choices)
|
44 |
-
return [gr.Image(None, label
|
45 |
|
46 |
def gen_fn(model_str, prompt):
|
47 |
if model_str == 'NA':
|
48 |
return None
|
49 |
-
noise = str('')
|
50 |
return models_load[model_str](f'{prompt} {noise}')
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
|
61 |
from pathlib import Path
|
62 |
kwargs = {}
|
63 |
noise = ""
|
64 |
kwargs["seed"] = seed
|
65 |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
|
66 |
-
prompt=f'{prompt} {noise}', **kwargs
|
67 |
await asyncio.sleep(0)
|
68 |
try:
|
69 |
result = await asyncio.wait_for(task, timeout=timeout)
|
@@ -100,8 +85,9 @@ def gen_fnseed(model_str, prompt, seed=1):
|
|
100 |
def gen_fnsix(model_str, prompt):
|
101 |
if model_str == 'NA':
|
102 |
return None
|
103 |
-
noisesix = str(randint(1941, 2023))
|
104 |
return models_load[model_str](f'{prompt} {noisesix}')
|
|
|
105 |
with gr.Blocks() as demo:
|
106 |
gr.HTML(
|
107 |
"""
|
@@ -118,24 +104,22 @@ with gr.Blocks() as demo:
|
|
118 |
"""
|
119 |
)
|
120 |
with gr.Tab('One Image'):
|
121 |
-
model_choice = gr.Dropdown(models, label
|
122 |
-
txt_input = gr.Textbox(label
|
123 |
|
124 |
max_imagesone = 1
|
125 |
-
num_imagesone = gr.Slider(1, max_imagesone, value
|
126 |
|
127 |
gen_button = gr.Button('Generate')
|
128 |
-
#stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
|
129 |
-
gen_button.click(lambda s: gr.update(interactive = True), None)
|
130 |
|
131 |
with gr.Row():
|
132 |
-
output = [gr.Image(label
|
133 |
|
134 |
for i, o in enumerate(output):
|
135 |
-
img_in = gr.Number(i, visible
|
136 |
-
num_imagesone.change(lambda i, n: gr.update(visible
|
137 |
gen_event = gen_button.click(lambda i, n, m, t: gen_fn(m, t) if (i < n) else None, [img_in, num_imagesone, model_choice, txt_input], o, concurrency_limit=None, queue=False)
|
138 |
-
|
139 |
with gr.Row():
|
140 |
gr.HTML(
|
141 |
"""
|
@@ -145,77 +129,44 @@ with gr.Blocks() as demo:
|
|
145 |
"""
|
146 |
)
|
147 |
with gr.Tab('Seed it!'):
|
148 |
-
model_choiceseed = gr.Dropdown(models, label
|
149 |
-
txt_inputseed = gr.Textbox(label
|
150 |
seed = gr.Slider(label="Use a seed to replicate the same image later", info="Max 3999999999", minimum=0, maximum=MAX_SEED, step=1, value=1)
|
151 |
|
152 |
max_imagesseed = 1
|
153 |
-
num_imagesseed = gr.Slider(1, max_imagesone, value
|
154 |
|
155 |
gen_buttonseed = gr.Button('Generate an image using the seed')
|
156 |
-
#stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
|
157 |
-
gen_button.click(lambda s: gr.update(interactive = True), None)
|
158 |
|
159 |
with gr.Row():
|
160 |
-
outputseed = [gr.Image(label
|
161 |
|
162 |
for i, o in enumerate(outputseed):
|
163 |
-
img_is = gr.Number(i, visible
|
164 |
-
num_imagesseed.change(lambda i, n: gr.update(visible
|
165 |
-
#gen_eventseed = gen_buttonseed.click(lambda i, n, m, t, n1: gen_fnseed(m, t, n1) if (i < n) else None, [img_is, num_imagesseed, model_choiceseed, txt_inputseed, useseed], o, concurrency_limit=None, queue=False)
|
166 |
-
|
167 |
gen_eventseed = gr.on(triggers=[gen_buttonseed.click, txt_inputseed.submit],
|
168 |
fn=lambda i, n, m, t, n1: gen_fnseed(m, t, n1) if (i < n) else None,
|
169 |
inputs=[img_is, num_imagesseed, model_choiceseed, txt_inputseed, seed], outputs=[o],
|
170 |
-
|
171 |
-
|
172 |
-
#stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
|
173 |
-
with gr.Row():
|
174 |
-
gr.HTML(
|
175 |
-
"""
|
176 |
-
<div class="footer">
|
177 |
-
<p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77, Omnibus's Maximum Multiplier, and <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>!
|
178 |
-
</p>
|
179 |
-
"""
|
180 |
-
)
|
181 |
-
with gr.Tab('Up To Six'):
|
182 |
-
model_choice2 = gr.Dropdown(models, label = f'Choose a model from the {len(models)} available! Try clearing the box and typing on it to filter them!', value = models[0], filterable = True)
|
183 |
-
txt_input2 = gr.Textbox(label = 'Your prompt:')
|
184 |
-
|
185 |
-
max_images = 6
|
186 |
-
num_images = gr.Slider(1, max_images, value = max_images, step = 1, label = 'Number of images (if you want less than 6 decrease them slowly until they match the boxes below)')
|
187 |
-
|
188 |
-
gen_button2 = gr.Button('Generate up to 6 images in up to 3 minutes total')
|
189 |
-
#stop_button2 = gr.Button('Stop', variant = 'secondary', interactive = False)
|
190 |
-
gen_button2.click(lambda s: gr.update(interactive = True), None)
|
191 |
-
gr.HTML(
|
192 |
-
"""
|
193 |
-
<div style="text-align: center; max-width: 1200px; margin: 0 auto;">
|
194 |
-
<div>
|
195 |
-
<body>
|
196 |
-
<div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images (they generate in a random order).</p>
|
197 |
-
</div>
|
198 |
-
</body>
|
199 |
-
</div>
|
200 |
-
</div>
|
201 |
-
"""
|
202 |
-
)
|
203 |
-
with gr.Column():
|
204 |
-
output2 = [gr.Image(label = '') for _ in range(max_images)]
|
205 |
-
|
206 |
-
for i, o in enumerate(output2):
|
207 |
-
img_i = gr.Number(i, visible = False)
|
208 |
-
num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, show_progress = False)
|
209 |
-
gen_event2 = gen_button2.click(lambda i, n, m, t: gen_fnsix(m, t) if (i < n) else None, [img_i, num_images, model_choice2, txt_input2], o, concurrency_limit=None, queue=False)
|
210 |
-
#stop_button2.click(lambda s: gr.update(interactive = False), None, stop_button2, cancels = [gen_event2])
|
211 |
with gr.Row():
|
212 |
gr.HTML(
|
213 |
"""
|
214 |
<div class="footer">
|
215 |
-
<p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen
|
216 |
-
|
217 |
-
|
218 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
|
220 |
-
|
221 |
-
demo.launch(show_api=False, max_threads=400)
|
|
|
1 |
import gradio as gr
|
2 |
from random import randint
|
3 |
from all_models import models
|
|
|
4 |
from externalmod import gr_Interface_load
|
5 |
|
6 |
import asyncio
|
7 |
import os
|
8 |
from threading import RLock
|
9 |
lock = RLock()
|
|
|
|
|
10 |
|
11 |
def load_fn(models):
|
12 |
global models_load
|
|
|
15 |
for model in models:
|
16 |
if model not in models_load.keys():
|
17 |
try:
|
18 |
+
m = gr_Interface_load(f'models/{model}')
|
19 |
except Exception as error:
|
20 |
print(error)
|
21 |
m = gr.Interface(lambda: None, ['text'], ['image'])
|
|
|
23 |
|
24 |
load_fn(models)
|
25 |
|
|
|
26 |
num_models = 1
|
27 |
default_models = models[:num_models]
|
28 |
inference_timeout = 600
|
29 |
|
30 |
+
MAX_SEED = 3999999999
|
|
|
|
|
31 |
|
32 |
def extend_choices(choices):
|
33 |
return choices + (num_models - len(choices)) * ['NA']
|
34 |
|
|
|
35 |
def update_imgbox(choices):
|
36 |
choices_plus = extend_choices(choices)
|
37 |
+
return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
|
38 |
|
39 |
def gen_fn(model_str, prompt):
|
40 |
if model_str == 'NA':
|
41 |
return None
|
42 |
+
noise = str('')
|
43 |
return models_load[model_str](f'{prompt} {noise}')
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
|
46 |
from pathlib import Path
|
47 |
kwargs = {}
|
48 |
noise = ""
|
49 |
kwargs["seed"] = seed
|
50 |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
|
51 |
+
prompt=f'{prompt} {noise}', **kwargs))
|
52 |
await asyncio.sleep(0)
|
53 |
try:
|
54 |
result = await asyncio.wait_for(task, timeout=timeout)
|
|
|
85 |
def gen_fnsix(model_str, prompt):
|
86 |
if model_str == 'NA':
|
87 |
return None
|
88 |
+
noisesix = str(randint(1941, 2023))
|
89 |
return models_load[model_str](f'{prompt} {noisesix}')
|
90 |
+
|
91 |
with gr.Blocks() as demo:
|
92 |
gr.HTML(
|
93 |
"""
|
|
|
104 |
"""
|
105 |
)
|
106 |
with gr.Tab('One Image'):
|
107 |
+
model_choice = gr.Dropdown(models, label=f'Choose a model from the {len(models)} available! Try clearing the box and typing on it to filter them!', value=models[0], filterable=True)
|
108 |
+
txt_input = gr.Textbox(label='Your prompt:')
|
109 |
|
110 |
max_imagesone = 1
|
111 |
+
num_imagesone = gr.Slider(1, max_imagesone, value=max_imagesone, step=1, label='Nobody gets to see this label so I can put here whatever I want!', visible=False)
|
112 |
|
113 |
gen_button = gr.Button('Generate')
|
|
|
|
|
114 |
|
115 |
with gr.Row():
|
116 |
+
output = [gr.Image(label='') for _ in range(max_imagesone)]
|
117 |
|
118 |
for i, o in enumerate(output):
|
119 |
+
img_in = gr.Number(i, visible=False)
|
120 |
+
num_imagesone.change(lambda i, n: gr.update(visible=(i < n)), [img_in, num_imagesone], o, show_progress=False)
|
121 |
gen_event = gen_button.click(lambda i, n, m, t: gen_fn(m, t) if (i < n) else None, [img_in, num_imagesone, model_choice, txt_input], o, concurrency_limit=None, queue=False)
|
122 |
+
|
123 |
with gr.Row():
|
124 |
gr.HTML(
|
125 |
"""
|
|
|
129 |
"""
|
130 |
)
|
131 |
with gr.Tab('Seed it!'):
|
132 |
+
model_choiceseed = gr.Dropdown(models, label=f'Choose a model from the {len(models)} available! Try clearing the box and typing on it to filter them!', value=models[0], filterable=True)
|
133 |
+
txt_inputseed = gr.Textbox(label='Your prompt:')
|
134 |
seed = gr.Slider(label="Use a seed to replicate the same image later", info="Max 3999999999", minimum=0, maximum=MAX_SEED, step=1, value=1)
|
135 |
|
136 |
max_imagesseed = 1
|
137 |
+
num_imagesseed = gr.Slider(1, max_imagesone, value=max_imagesone, step=1, label='One, because more would make it produce identical images with the seed', visible=False)
|
138 |
|
139 |
gen_buttonseed = gr.Button('Generate an image using the seed')
|
|
|
|
|
140 |
|
141 |
with gr.Row():
|
142 |
+
outputseed = [gr.Image(label='') for _ in range(max_imagesseed)]
|
143 |
|
144 |
for i, o in enumerate(outputseed):
|
145 |
+
img_is = gr.Number(i, visible=False)
|
146 |
+
num_imagesseed.change(lambda i, n: gr.update(visible=(i < n)), [img_is, num_imagesseed], o, show_progress=False)
|
|
|
|
|
147 |
gen_eventseed = gr.on(triggers=[gen_buttonseed.click, txt_inputseed.submit],
|
148 |
fn=lambda i, n, m, t, n1: gen_fnseed(m, t, n1) if (i < n) else None,
|
149 |
inputs=[img_is, num_imagesseed, model_choiceseed, txt_inputseed, seed], outputs=[o],
|
150 |
+
concurrency_limit=None, queue=False)
|
151 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
with gr.Row():
|
153 |
gr.HTML(
|
154 |
"""
|
155 |
<div class="footer">
|
156 |
+
<p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen彼此
|
157 |
+
|
158 |
+
Key changes made:
|
159 |
+
1. Removed `HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None`
|
160 |
+
2. In `load_fn`, changed `m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)` to `m = gr_Interface_load(f'models/{model}')`
|
161 |
+
3. In `infer`, removed `token=HF_TOKEN` from the `asyncio.to_thread` call
|
162 |
+
|
163 |
+
This version assumes that the models loaded via `gr_Interface_load` don't require authentication. Make sure that:
|
164 |
+
1. The `all_models.py` file contains a list of publicly accessible models
|
165 |
+
2. The `externalmod.gr_Interface_load` function can handle loading models without a token
|
166 |
+
|
167 |
+
Note: If any of your models actually require authentication, you'll need to either:
|
168 |
+
- Use only public models
|
169 |
+
- Implement an alternative authentication method
|
170 |
+
- Keep the HF_TOKEN but handle it differently
|
171 |
|
172 |
+
Would you like me to explain any specific part of these changes in more detail?
|
|