Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,323 +1,473 @@
|
|
1 |
-
|
2 |
|
3 |
-
from
|
|
|
4 |
|
5 |
-
import
|
|
|
6 |
import os
|
7 |
-
import
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
import
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
if
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
)
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
if use_lora:
|
84 |
-
pipe.load_lora_weights(lora)
|
85 |
-
pipe.fuse_lora(lora_scale)
|
86 |
-
|
87 |
else:
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
prompt_2=prompt_2,
|
105 |
-
negative_prompt_2=negative_prompt_2,
|
106 |
-
width=width,
|
107 |
-
height=height,
|
108 |
-
guidance_scale=guidance_scale_base,
|
109 |
-
num_inference_steps=num_inference_steps_base,
|
110 |
-
generator=generator,
|
111 |
-
output_type="pil",
|
112 |
-
).images[0]
|
113 |
else:
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
gr.HTML(
|
132 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
)
|
134 |
-
gr.Markdown(DESCRIPTION, elem_id="description")
|
135 |
with gr.Group():
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
|
166 |
-
negative_prompt = gr.Text(
|
167 |
-
placeholder="Input Negative Prompt",
|
168 |
-
label="Negative prompt",
|
169 |
-
max_lines=1,
|
170 |
-
visible=False,
|
171 |
-
)
|
172 |
-
prompt_2 = gr.Text(
|
173 |
-
placeholder="Input Prompt 2",
|
174 |
-
label="Prompt 2",
|
175 |
-
max_lines=1,
|
176 |
-
visible=False,
|
177 |
-
)
|
178 |
-
negative_prompt_2 = gr.Text(
|
179 |
-
placeholder="Input Negative Prompt 2",
|
180 |
-
label="Negative prompt 2",
|
181 |
-
max_lines=1,
|
182 |
-
visible=False,
|
183 |
-
)
|
184 |
|
185 |
-
|
186 |
-
label="
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
label="
|
203 |
-
|
204 |
-
|
205 |
-
step=
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
guidance_scale_base = gr.Slider(
|
211 |
-
info="Scale for classifier-free guidance",
|
212 |
-
label="Guidance scale",
|
213 |
-
minimum=1,
|
214 |
-
maximum=20,
|
215 |
-
step=0.1,
|
216 |
-
value=5.0,
|
217 |
)
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
maximum=100,
|
224 |
step=1,
|
225 |
-
|
226 |
-
)
|
227 |
-
with gr.Row():
|
228 |
-
strength_img2img = gr.Slider(
|
229 |
-
info="Strength for Img2Img",
|
230 |
-
label="Strength",
|
231 |
-
minimum=0,
|
232 |
-
maximum=1,
|
233 |
-
step=0.01,
|
234 |
-
value=0.7,
|
235 |
)
|
236 |
|
237 |
-
|
238 |
-
|
239 |
-
inputs=
|
240 |
-
outputs=
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
)
|
279 |
-
|
280 |
-
gr.on(
|
281 |
-
triggers=[
|
282 |
-
prompt.submit,
|
283 |
-
negative_prompt.submit,
|
284 |
-
prompt_2.submit,
|
285 |
-
negative_prompt_2.submit,
|
286 |
-
run_button.click,
|
287 |
-
],
|
288 |
-
fn=randomize_seed_fn,
|
289 |
-
inputs=[seed, randomize_seed],
|
290 |
-
outputs=seed,
|
291 |
-
queue=False,
|
292 |
-
api_name=False,
|
293 |
-
).then(
|
294 |
-
fn=generate,
|
295 |
-
inputs=[
|
296 |
-
prompt,
|
297 |
-
negative_prompt,
|
298 |
-
prompt_2,
|
299 |
-
negative_prompt_2,
|
300 |
-
use_negative_prompt,
|
301 |
-
use_prompt_2,
|
302 |
-
use_negative_prompt_2,
|
303 |
-
seed,
|
304 |
-
width,
|
305 |
-
height,
|
306 |
-
guidance_scale_base,
|
307 |
-
num_inference_steps_base,
|
308 |
-
strength_img2img,
|
309 |
-
use_vae,
|
310 |
-
use_lora,
|
311 |
-
model,
|
312 |
-
vaecall,
|
313 |
-
lora,
|
314 |
-
lora_scale,
|
315 |
-
use_img2img,
|
316 |
-
url,
|
317 |
-
],
|
318 |
-
outputs=result,
|
319 |
-
api_name="run",
|
320 |
-
)
|
321 |
|
322 |
-
|
323 |
-
demo.queue(default_concurrency_limit=10).launch()
|
|
|
1 |
+
import gradio as gr
|
2 |
|
3 |
+
from diffusers import DiffusionPipeline, LCMScheduler
|
4 |
+
import torch
|
5 |
|
6 |
+
import base64
|
7 |
+
from io import BytesIO
|
8 |
import os
|
9 |
+
import gc
|
10 |
+
import warnings
|
11 |
|
12 |
+
# Only used when MULTI_GPU set to True
|
13 |
+
from helper import UNetDataParallel
|
14 |
+
from share_btn import community_icon_html, loading_icon_html, share_js
|
15 |
+
|
16 |
+
# SDXL code: https://github.com/huggingface/diffusers/pull/3859
|
17 |
+
|
18 |
+
# Process environment variables
|
19 |
+
# Use `segmind/SSD-1B` (distilled SDXL) for faster generation.
|
20 |
+
use_ssd = os.getenv("USE_SSD", "false").lower() == "true"
|
21 |
+
if use_ssd:
|
22 |
+
model_key_base = "segmind/SSD-1B"
|
23 |
+
model_key_refiner = "stabilityai/stable-diffusion-xl-refiner-1.0"
|
24 |
+
lcm_lora_id = "latent-consistency/lcm-lora-ssd-1b"
|
25 |
+
else:
|
26 |
+
model_key_base = "stabilityai/stable-diffusion-xl-base-1.0"
|
27 |
+
model_key_refiner = "stabilityai/stable-diffusion-xl-refiner-1.0"
|
28 |
+
lcm_lora_id = "latent-consistency/lcm-lora-sdxl"
|
29 |
+
|
30 |
+
# Use LCM LoRA (enabled by default)
|
31 |
+
if "ENABLE_LCM" not in os.environ:
|
32 |
+
warnings.warn("`ENABLE_LCM` environment variable is not set. LCM LoRA will be loaded by default and refiner will be disabled by default. You can set it to `False` to turn off LCM LoRA.")
|
33 |
+
|
34 |
+
enable_lcm = os.getenv("ENABLE_LCM", "true").lower() == "true"
|
35 |
+
# Use refiner (disabled by default if LCM is enabled)
|
36 |
+
enable_refiner = os.getenv("ENABLE_REFINER", "false" if enable_lcm or use_ssd else "true").lower() == "true"
|
37 |
+
# Output images before the refiner and after the refiner
|
38 |
+
output_images_before_refiner = os.getenv("OUTPUT_IMAGES_BEFORE_REFINER", "false").lower() == "true"
|
39 |
+
|
40 |
+
offload_base = os.getenv("OFFLOAD_BASE", "false").lower() == "true"
|
41 |
+
offload_refiner = os.getenv("OFFLOAD_REFINER", "true").lower() == "true"
|
42 |
+
|
43 |
+
# Generate how many images by default
|
44 |
+
default_num_images = int(os.getenv("DEFAULT_NUM_IMAGES", "4"))
|
45 |
+
if default_num_images < 1:
|
46 |
+
default_num_images = 1
|
47 |
+
|
48 |
+
# Create public link
|
49 |
+
share = os.getenv("SHARE", "false").lower() == "true"
|
50 |
+
|
51 |
+
print("Loading model", model_key_base)
|
52 |
+
pipe = DiffusionPipeline.from_pretrained(model_key_base, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
|
53 |
+
|
54 |
+
if enable_lcm:
|
55 |
+
pipe.load_lora_weights(lcm_lora_id)
|
56 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
57 |
+
|
58 |
+
multi_gpu = os.getenv("MULTI_GPU", "false").lower() == "true"
|
59 |
+
|
60 |
+
if multi_gpu:
|
61 |
+
pipe.unet = UNetDataParallel(pipe.unet)
|
62 |
+
pipe.unet.config, pipe.unet.dtype, pipe.unet.add_embedding = pipe.unet.module.config, pipe.unet.module.dtype, pipe.unet.module.add_embedding
|
63 |
+
pipe.to("cuda")
|
64 |
+
else:
|
65 |
+
if offload_base:
|
66 |
+
pipe.enable_model_cpu_offload()
|
67 |
+
else:
|
68 |
+
pipe.to("cuda")
|
69 |
+
|
70 |
+
# if using torch < 2.0
|
71 |
+
# pipe.enable_xformers_memory_efficient_attention()
|
72 |
+
|
73 |
+
# pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
74 |
+
|
75 |
+
if enable_refiner:
|
76 |
+
print("Loading model", model_key_refiner)
|
77 |
+
pipe_refiner = DiffusionPipeline.from_pretrained(model_key_refiner, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
|
78 |
+
if multi_gpu:
|
79 |
+
pipe_refiner.unet = UNetDataParallel(pipe_refiner.unet)
|
80 |
+
pipe_refiner.unet.config, pipe_refiner.unet.dtype, pipe_refiner.unet.add_embedding = pipe_refiner.unet.module.config, pipe_refiner.unet.module.dtype, pipe_refiner.unet.module.add_embedding
|
81 |
+
pipe_refiner.to("cuda")
|
82 |
+
else:
|
83 |
+
if offload_refiner:
|
84 |
+
pipe_refiner.enable_model_cpu_offload()
|
|
|
|
|
|
|
|
|
|
|
85 |
else:
|
86 |
+
pipe_refiner.to("cuda")
|
87 |
+
|
88 |
+
# if using torch < 2.0
|
89 |
+
# pipe_refiner.enable_xformers_memory_efficient_attention()
|
90 |
+
|
91 |
+
# pipe_refiner.unet = torch.compile(pipe_refiner.unet, mode="reduce-overhead", fullgraph=True)
|
92 |
+
|
93 |
+
# NOTE: we do not have word list filtering in this gradio demo
|
94 |
+
|
95 |
+
is_gpu_busy = False
|
96 |
+
def infer(prompt, negative, scale, samples=4, steps=50, refiner_strength=0.3, seed=-1):
|
97 |
+
prompt, negative = [prompt] * samples, [negative] * samples
|
98 |
+
|
99 |
+
g = torch.Generator(device="cuda")
|
100 |
+
if seed != -1:
|
101 |
+
g.manual_seed(seed)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
else:
|
103 |
+
g.seed()
|
104 |
+
|
105 |
+
images_b64_list = []
|
106 |
+
|
107 |
+
if not enable_refiner or output_images_before_refiner:
|
108 |
+
images = pipe(prompt=prompt, negative_prompt=negative, guidance_scale=scale, num_inference_steps=steps, generator=g).images
|
109 |
+
else:
|
110 |
+
# This skips the decoding and re-encoding for refinement.
|
111 |
+
images = pipe(prompt=prompt, negative_prompt=negative, guidance_scale=scale, num_inference_steps=steps, output_type="latent", generator=g).images
|
112 |
+
|
113 |
+
gc.collect()
|
114 |
+
torch.cuda.empty_cache()
|
115 |
+
|
116 |
+
if enable_refiner:
|
117 |
+
if output_images_before_refiner:
|
118 |
+
for image in images:
|
119 |
+
buffered = BytesIO()
|
120 |
+
image.save(buffered, format="JPEG")
|
121 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
122 |
+
|
123 |
+
image_b64 = (f"data:image/jpeg;base64,{img_str}")
|
124 |
+
images_b64_list.append(image_b64)
|
125 |
+
|
126 |
+
images = pipe_refiner(prompt=prompt, negative_prompt=negative, image=images, num_inference_steps=steps, strength=refiner_strength, generator=g).images
|
127 |
+
|
128 |
+
gc.collect()
|
129 |
+
torch.cuda.empty_cache()
|
130 |
+
|
131 |
+
for image in images:
|
132 |
+
buffered = BytesIO()
|
133 |
+
image.save(buffered, format="JPEG")
|
134 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
135 |
+
|
136 |
+
image_b64 = (f"data:image/jpeg;base64,{img_str}")
|
137 |
+
images_b64_list.append(image_b64)
|
138 |
+
|
139 |
+
return images_b64_list
|
140 |
+
|
141 |
+
# Reference: https://huggingface.co/spaces/google/sdxl/blob/main/app.py#L139
|
142 |
+
css = """
|
143 |
+
.gradio-container {
|
144 |
+
font-family: 'IBM Plex Sans', sans-serif;
|
145 |
+
}
|
146 |
+
.gr-button {
|
147 |
+
color: white;
|
148 |
+
border-color: black;
|
149 |
+
background: black;
|
150 |
+
}
|
151 |
+
input[type='range'] {
|
152 |
+
accent-color: black;
|
153 |
+
}
|
154 |
+
.dark input[type='range'] {
|
155 |
+
accent-color: #dfdfdf;
|
156 |
+
}
|
157 |
+
.gradio-container {
|
158 |
+
max-width: 730px !important;
|
159 |
+
margin: auto;
|
160 |
+
padding-top: 1.5rem;
|
161 |
+
}
|
162 |
+
#gallery {
|
163 |
+
min-height: 22rem;
|
164 |
+
margin-bottom: 15px;
|
165 |
+
margin-left: auto;
|
166 |
+
margin-right: auto;
|
167 |
+
border-bottom-right-radius: .5rem !important;
|
168 |
+
border-bottom-left-radius: .5rem !important;
|
169 |
+
}
|
170 |
+
#gallery>div>.h-full {
|
171 |
+
min-height: 20rem;
|
172 |
+
}
|
173 |
+
.details:hover {
|
174 |
+
text-decoration: underline;
|
175 |
+
}
|
176 |
+
.gr-button {
|
177 |
+
white-space: nowrap;
|
178 |
+
}
|
179 |
+
.gr-button:focus {
|
180 |
+
border-color: rgb(147 197 253 / var(--tw-border-opacity));
|
181 |
+
outline: none;
|
182 |
+
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
183 |
+
--tw-border-opacity: 1;
|
184 |
+
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
185 |
+
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
|
186 |
+
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
|
187 |
+
--tw-ring-opacity: .5;
|
188 |
+
}
|
189 |
+
#advanced-btn {
|
190 |
+
font-size: .7rem !important;
|
191 |
+
line-height: 19px;
|
192 |
+
margin-top: 12px;
|
193 |
+
margin-bottom: 12px;
|
194 |
+
padding: 2px 8px;
|
195 |
+
border-radius: 14px !important;
|
196 |
+
}
|
197 |
+
#advanced-options {
|
198 |
+
display: none;
|
199 |
+
margin-bottom: 20px;
|
200 |
+
}
|
201 |
+
.footer {
|
202 |
+
margin-bottom: 45px;
|
203 |
+
margin-top: 35px;
|
204 |
+
text-align: center;
|
205 |
+
border-bottom: 1px solid #e5e5e5;
|
206 |
+
}
|
207 |
+
.footer>p {
|
208 |
+
font-size: .8rem;
|
209 |
+
display: inline-block;
|
210 |
+
padding: 10px 10px;
|
211 |
+
transform: translateY(10px);
|
212 |
+
background: white;
|
213 |
+
}
|
214 |
+
.dark .footer {
|
215 |
+
border-color: #303030;
|
216 |
+
}
|
217 |
+
.dark .footer>p {
|
218 |
+
background: #0b0f19;
|
219 |
+
}
|
220 |
+
.acknowledgments h4{
|
221 |
+
margin: 1.25em 0 .25em 0;
|
222 |
+
font-weight: bold;
|
223 |
+
font-size: 115%;
|
224 |
+
}
|
225 |
+
.animate-spin {
|
226 |
+
animation: spin 1s linear infinite;
|
227 |
+
}
|
228 |
+
@keyframes spin {
|
229 |
+
from {
|
230 |
+
transform: rotate(0deg);
|
231 |
+
}
|
232 |
+
to {
|
233 |
+
transform: rotate(360deg);
|
234 |
+
}
|
235 |
+
}
|
236 |
+
#share-btn-container {
|
237 |
+
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
|
238 |
+
margin-top: 10px;
|
239 |
+
margin-left: auto;
|
240 |
+
}
|
241 |
+
#share-btn {
|
242 |
+
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
|
243 |
+
}
|
244 |
+
#share-btn * {
|
245 |
+
all: unset;
|
246 |
+
}
|
247 |
+
#share-btn-container div:nth-child(-n+2){
|
248 |
+
width: auto !important;
|
249 |
+
min-height: 0px !important;
|
250 |
+
}
|
251 |
+
#share-btn-container .wrap {
|
252 |
+
display: none !important;
|
253 |
+
}
|
254 |
+
|
255 |
+
.gr-form{
|
256 |
+
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
|
257 |
+
}
|
258 |
+
#prompt-container{
|
259 |
+
gap: 0;
|
260 |
+
margin: 0 10px 0 0;
|
261 |
+
}
|
262 |
+
#generate-image-btn {
|
263 |
+
margin: 0 0 0 10px;
|
264 |
+
}
|
265 |
+
#prompt-text-input, #negative-prompt-text-input{padding: .45rem 0.625rem}
|
266 |
+
#component-16{border-top-width: 1px!important;margin-top: 1em}
|
267 |
+
.image_duplication{position: absolute; width: 100px; left: 50px}
|
268 |
+
"""
|
269 |
+
|
270 |
+
block = gr.Blocks(css=css)
|
271 |
+
|
272 |
+
default_guidance_scale = 1 if enable_lcm else 9
|
273 |
+
|
274 |
+
examples = [
|
275 |
+
[
|
276 |
+
'A high tech solarpunk utopia in the Amazon rainforest',
|
277 |
+
'low quality',
|
278 |
+
default_guidance_scale
|
279 |
+
],
|
280 |
+
[
|
281 |
+
'A pikachu fine dining with a view to the Eiffel Tower',
|
282 |
+
'low quality',
|
283 |
+
default_guidance_scale
|
284 |
+
],
|
285 |
+
[
|
286 |
+
'A mecha robot in a favela in expressionist style',
|
287 |
+
'low quality, 3d, photorealistic',
|
288 |
+
default_guidance_scale
|
289 |
+
],
|
290 |
+
[
|
291 |
+
'an insect robot preparing a delicious meal',
|
292 |
+
'low quality, illustration',
|
293 |
+
default_guidance_scale
|
294 |
+
],
|
295 |
+
[
|
296 |
+
"A small cabin on top of a snowy mountain in the style of Disney, artstation",
|
297 |
+
'low quality, ugly',
|
298 |
+
default_guidance_scale
|
299 |
+
],
|
300 |
+
]
|
301 |
+
|
302 |
+
|
303 |
+
with block:
|
304 |
gr.HTML(
|
305 |
+
f"""
|
306 |
+
<div style="text-align: center; margin: 0 auto;">
|
307 |
+
<div
|
308 |
+
style="
|
309 |
+
display: inline-flex;
|
310 |
+
align-items: center;
|
311 |
+
gap: 0.8rem;
|
312 |
+
font-size: 1.75rem;
|
313 |
+
"
|
314 |
+
>
|
315 |
+
<svg
|
316 |
+
width="0.65em"
|
317 |
+
height="0.65em"
|
318 |
+
viewBox="0 0 115 115"
|
319 |
+
fill="none"
|
320 |
+
xmlns="http://www.w3.org/2000/svg"
|
321 |
+
>
|
322 |
+
<rect width="23" height="23" fill="white"></rect>
|
323 |
+
<rect y="69" width="23" height="23" fill="white"></rect>
|
324 |
+
<rect x="23" width="23" height="23" fill="#AEAEAE"></rect>
|
325 |
+
<rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect>
|
326 |
+
<rect x="46" width="23" height="23" fill="white"></rect>
|
327 |
+
<rect x="46" y="69" width="23" height="23" fill="white"></rect>
|
328 |
+
<rect x="69" width="23" height="23" fill="black"></rect>
|
329 |
+
<rect x="69" y="69" width="23" height="23" fill="black"></rect>
|
330 |
+
<rect x="92" width="23" height="23" fill="#D9D9D9"></rect>
|
331 |
+
<rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect>
|
332 |
+
<rect x="115" y="46" width="23" height="23" fill="white"></rect>
|
333 |
+
<rect x="115" y="115" width="23" height="23" fill="white"></rect>
|
334 |
+
<rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect>
|
335 |
+
<rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect>
|
336 |
+
<rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect>
|
337 |
+
<rect x="92" y="69" width="23" height="23" fill="white"></rect>
|
338 |
+
<rect x="69" y="46" width="23" height="23" fill="white"></rect>
|
339 |
+
<rect x="69" y="115" width="23" height="23" fill="white"></rect>
|
340 |
+
<rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect>
|
341 |
+
<rect x="46" y="46" width="23" height="23" fill="black"></rect>
|
342 |
+
<rect x="46" y="115" width="23" height="23" fill="black"></rect>
|
343 |
+
<rect x="46" y="69" width="23" height="23" fill="black"></rect>
|
344 |
+
<rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect>
|
345 |
+
<rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect>
|
346 |
+
<rect x="23" y="69" width="23" height="23" fill="black"></rect>
|
347 |
+
</svg>
|
348 |
+
<h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
|
349 |
+
Stable Diffusion XL 1.0 Demo
|
350 |
+
</h1>
|
351 |
+
</div>
|
352 |
+
<p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">
|
353 |
+
Stable Diffusion XL 1.0 is the latest text-to-image model from StabilityAI.
|
354 |
+
<br/>
|
355 |
+
Source code of this space is on
|
356 |
+
<a
|
357 |
+
href="https://github.com/TonyLianLong/stable-diffusion-xl-demo"
|
358 |
+
style="text-decoration: underline;"
|
359 |
+
target="_blank"
|
360 |
+
>TonyLianLong/stable-diffusion-xl-demo</a>.
|
361 |
+
</p>
|
362 |
+
</div>
|
363 |
+
"""
|
364 |
)
|
|
|
365 |
with gr.Group():
|
366 |
+
with gr.Box():
|
367 |
+
with gr.Row(elem_id="prompt-container", equal_height=True, style=dict(mobile_collapse=False)):
|
368 |
+
with gr.Column():
|
369 |
+
text = gr.Textbox(
|
370 |
+
label="Enter your prompt",
|
371 |
+
show_label=False,
|
372 |
+
max_lines=1,
|
373 |
+
placeholder="Enter your prompt",
|
374 |
+
elem_id="prompt-text-input",
|
375 |
+
).style(
|
376 |
+
border=(True, False, True, True),
|
377 |
+
rounded=(True, False, False, True),
|
378 |
+
container=False,
|
379 |
+
)
|
380 |
+
negative = gr.Textbox(
|
381 |
+
label="Enter your negative prompt",
|
382 |
+
show_label=False,
|
383 |
+
max_lines=1,
|
384 |
+
placeholder="Enter a negative prompt",
|
385 |
+
elem_id="negative-prompt-text-input",
|
386 |
+
).style(
|
387 |
+
border=(True, False, True, True),
|
388 |
+
rounded=(True, False, False, True),
|
389 |
+
container=False,
|
390 |
+
)
|
391 |
+
btn = gr.Button("Generate image", elem_id="generate-image-btn").style(
|
392 |
+
rounded=(False, True, True, False),
|
393 |
+
full_width=False,
|
394 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
395 |
|
396 |
+
gallery = gr.Gallery(
|
397 |
+
label="Generated images", show_label=False, elem_id="gallery"
|
398 |
+
).style(grid=[2], height="auto")
|
399 |
+
|
400 |
+
with gr.Group(elem_id="container-advanced-btns"):
|
401 |
+
#advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
|
402 |
+
with gr.Group(elem_id="share-btn-container"):
|
403 |
+
community_icon = gr.HTML(community_icon_html)
|
404 |
+
loading_icon = gr.HTML(loading_icon_html)
|
405 |
+
share_button = gr.Button("Share to community", elem_id="share-btn")
|
406 |
+
|
407 |
+
with gr.Accordion("Advanced settings", open=False):
|
408 |
+
# gr.Markdown("Advanced settings are temporarily unavailable")
|
409 |
+
samples = gr.Slider(label="Images", minimum=1, maximum=max(16 if enable_lcm else 4, default_num_images), value=default_num_images, step=1)
|
410 |
+
if enable_lcm:
|
411 |
+
steps = gr.Slider(label="Steps", minimum=1, maximum=10, value=4, step=1)
|
412 |
+
else:
|
413 |
+
steps = gr.Slider(label="Steps", minimum=1, maximum=250, value=50, step=1)
|
414 |
+
|
415 |
+
if enable_refiner:
|
416 |
+
refiner_strength = gr.Slider(label="Refiner Strength", minimum=0, maximum=1.0, value=0.3, step=0.1)
|
417 |
+
else:
|
418 |
+
refiner_strength = gr.Slider(label="Refiner Strength (refiner not enabled)", minimum=0, maximum=0, value=0, step=0)
|
419 |
+
guidance_scale = gr.Slider(
|
420 |
+
label="Guidance Scale", minimum=0, maximum=50, value=default_guidance_scale, step=0.1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
421 |
)
|
422 |
+
|
423 |
+
seed = gr.Slider(
|
424 |
+
label="Seed",
|
425 |
+
minimum=-1,
|
426 |
+
maximum=2147483647,
|
|
|
427 |
step=1,
|
428 |
+
randomize=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
429 |
)
|
430 |
|
431 |
+
ex = gr.Examples(examples=examples, fn=infer, inputs=[text, negative, guidance_scale], outputs=[gallery, community_icon, loading_icon, share_button], cache_examples=False)
|
432 |
+
ex.dataset.headers = [""]
|
433 |
+
negative.submit(infer, inputs=[text, negative, guidance_scale, samples, steps, refiner_strength, seed], outputs=[gallery], postprocess=False)
|
434 |
+
text.submit(infer, inputs=[text, negative, guidance_scale, samples, steps, refiner_strength, seed], outputs=[gallery], postprocess=False)
|
435 |
+
btn.click(infer, inputs=[text, negative, guidance_scale, samples, steps, refiner_strength, seed], outputs=[gallery], postprocess=False)
|
436 |
+
|
437 |
+
#advanced_button.click(
|
438 |
+
# None,
|
439 |
+
# [],
|
440 |
+
# text,
|
441 |
+
# _js="""
|
442 |
+
# () => {
|
443 |
+
# const options = document.querySelector("body > gradio-app").querySelector("#advanced-options");
|
444 |
+
# options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none";
|
445 |
+
# }""",
|
446 |
+
#)
|
447 |
+
share_button.click(
|
448 |
+
None,
|
449 |
+
[],
|
450 |
+
[],
|
451 |
+
_js=share_js,
|
452 |
+
)
|
453 |
+
gr.HTML(
|
454 |
+
f"""
|
455 |
+
<div class="footer">
|
456 |
+
<p>
|
457 |
+
This space uses {model_key_base} model{" with " + lcm_lora_id + " LCM LoRA" if enable_lcm else ""}. - Gradio Demo by 🤗 Hugging Face and <a style="text-decoration: underline;" href="https://tonylian.com/">Long (Tony) Lian</a> <br/>
|
458 |
+
</p>
|
459 |
+
</div>
|
460 |
+
"""
|
461 |
+
)
|
462 |
+
with gr.Accordion(label="License", open=False):
|
463 |
+
gr.HTML(
|
464 |
+
"""<div class="acknowledgments">
|
465 |
+
<p><h4>LICENSE</h4>
|
466 |
+
The SDXL 1.0 model is licensed with a <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md" style="text-decoration: underline;" target="_blank">Stability AI CreativeML Open RAIL++-M</a> license. The License allows users to take advantage of the model in a wide range of settings (including free use and redistribution) as long as they respect the specific use case restrictions outlined, which correspond to model applications the licensor deems ill-suited for the model or are likely to cause harm. For the full list of restrictions please <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md" style="text-decoration: underline;" target="_blank">read the license</a>.
|
467 |
+
<p><h4>Biases and content acknowledgment</h4>
|
468 |
+
Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" style="text-decoration: underline;" target="_blank">model card</a></p>
|
469 |
+
</div>
|
470 |
+
"""
|
471 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
472 |
|
473 |
+
block.queue().launch(share=share)
|
|