craftgamesnetwork commited on
Commit
e9a2ae7
·
verified ·
1 Parent(s): a1c8f21

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +458 -308
app.py CHANGED
@@ -1,323 +1,473 @@
1
- #!/usr/bin/env python
2
 
3
- from __future__ import annotations
 
4
 
5
- import requests
 
6
  import os
7
- import random
 
8
 
9
- import gradio as gr
10
- import numpy as np
11
- import spaces
12
- import torch
13
- from PIL import Image
14
- from io import BytesIO
15
- from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image
16
-
17
- DESCRIPTION = "# Run any LoRA or SD Model"
18
- if not torch.cuda.is_available():
19
- DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
20
-
21
- MAX_SEED = np.iinfo(np.int32).max
22
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
23
- ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
24
- ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
25
- ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_VAE", "1") == "1"
26
-
27
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
28
-
29
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
30
- if randomize_seed:
31
- seed = random.randint(0, MAX_SEED)
32
- return seed
33
-
34
-
35
- @spaces.GPU
36
- def generate(
37
- prompt: str,
38
- negative_prompt: str = "",
39
- prompt_2: str = "",
40
- negative_prompt_2: str = "",
41
- use_negative_prompt: bool = False,
42
- use_prompt_2: bool = False,
43
- use_negative_prompt_2: bool = False,
44
- seed: int = 0,
45
- width: int = 1024,
46
- height: int = 1024,
47
- guidance_scale_base: float = 5.0,
48
- num_inference_steps_base: int = 25,
49
- strength_img2img: float = 0.7,
50
- use_vae: bool = False,
51
- use_lora: bool = False,
52
- model = 'stabilityai/stable-diffusion-xl-base-1.0',
53
- vaecall = 'madebyollin/sdxl-vae-fp16-fix',
54
- lora = '',
55
- lora_scale: float = 0.7,
56
- use_img2img: bool = False,
57
- url = '',
58
- ):
59
- if torch.cuda.is_available():
60
-
61
- if not use_img2img:
62
- pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
- pipe.enable_model_cpu_offload()
64
-
65
- if use_vae:
66
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
67
- pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
68
- pipe.enable_model_cpu_offload()
69
-
70
- if use_img2img:
71
- pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
72
- pipe.enable_model_cpu_offload()
73
-
74
- if use_vae:
75
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
76
- pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
77
- pipe.enable_model_cpu_offload()
78
-
79
- response = requests.get(url)
80
- init_image = Image.open(BytesIO(response.content)).convert("RGB")
81
- init_image = init_image.resize((width, height))
82
-
83
- if use_lora:
84
- pipe.load_lora_weights(lora)
85
- pipe.fuse_lora(lora_scale)
86
-
87
  else:
88
- pipe.to(device)
89
-
90
-
91
- generator = torch.Generator().manual_seed(seed)
92
-
93
- if not use_negative_prompt:
94
- negative_prompt = None # type: ignore
95
- if not use_prompt_2:
96
- prompt_2 = None # type: ignore
97
- if not use_negative_prompt_2:
98
- negative_prompt_2 = None # type: ignore
99
-
100
- if not use_img2img:
101
- return pipe(
102
- prompt=prompt,
103
- negative_prompt=negative_prompt,
104
- prompt_2=prompt_2,
105
- negative_prompt_2=negative_prompt_2,
106
- width=width,
107
- height=height,
108
- guidance_scale=guidance_scale_base,
109
- num_inference_steps=num_inference_steps_base,
110
- generator=generator,
111
- output_type="pil",
112
- ).images[0]
113
  else:
114
- images = pipe(
115
- prompt=prompt,
116
- image=init_image,
117
- strength=strength_img2img,
118
- negative_prompt=negative_prompt,
119
- prompt_2=prompt_2,
120
- negative_prompt_2=negative_prompt_2,
121
- width=width,
122
- height=height,
123
- guidance_scale=guidance_scale_base,
124
- num_inference_steps=num_inference_steps_base,
125
- generator=generator,
126
- output_type="pil",
127
- ).images[0]
128
- return images
129
-
130
- with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  gr.HTML(
132
- "<p><center>📙 For any additional support, join our <a href='https://discord.gg/JprjXpjt9K'>Discord</a></center></p>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  )
134
- gr.Markdown(DESCRIPTION, elem_id="description")
135
  with gr.Group():
136
- model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
137
- vaecall = gr.Text(label='VAE', placeholder='e.g. madebyollin/sdxl-vae-fp16-fix')
138
- lora = gr.Text(label='LoRA', placeholder='e.g. nerijs/pixel-art-xl')
139
- lora_scale = gr.Slider(
140
- info="The closer to 1, the more it will resemble LoRA, but errors may be visible.",
141
- label="Lora Scale",
142
- minimum=0.01,
143
- maximum=1,
144
- step=0.01,
145
- value=0.7,
146
- )
147
- url = gr.Text(label='URL (Img2Img)', placeholder='e.g https://example.com/image.png')
148
- with gr.Row():
149
- prompt = gr.Text(
150
- placeholder="Input prompt",
151
- label="Prompt",
152
- show_label=False,
153
- max_lines=1,
154
- container=False,
155
- )
156
- run_button = gr.Button("Run", scale=0)
157
- result = gr.Image(label="Result", show_label=False)
158
- with gr.Accordion("Advanced options", open=False):
159
- with gr.Row():
160
- use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
161
- use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
162
- use_lora = gr.Checkbox(label='Use Lora', value=False, visible=ENABLE_USE_LORA)
163
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
164
- use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
165
- use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
166
- negative_prompt = gr.Text(
167
- placeholder="Input Negative Prompt",
168
- label="Negative prompt",
169
- max_lines=1,
170
- visible=False,
171
- )
172
- prompt_2 = gr.Text(
173
- placeholder="Input Prompt 2",
174
- label="Prompt 2",
175
- max_lines=1,
176
- visible=False,
177
- )
178
- negative_prompt_2 = gr.Text(
179
- placeholder="Input Negative Prompt 2",
180
- label="Negative prompt 2",
181
- max_lines=1,
182
- visible=False,
183
- )
184
 
185
- seed = gr.Slider(
186
- label="Seed",
187
- minimum=0,
188
- maximum=MAX_SEED,
189
- step=1,
190
- value=0,
191
- )
192
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
193
- with gr.Row():
194
- width = gr.Slider(
195
- label="Width",
196
- minimum=256,
197
- maximum=MAX_IMAGE_SIZE,
198
- step=32,
199
- value=1024,
200
- )
201
- height = gr.Slider(
202
- label="Height",
203
- minimum=256,
204
- maximum=MAX_IMAGE_SIZE,
205
- step=32,
206
- value=1024,
207
- )
208
-
209
- with gr.Row():
210
- guidance_scale_base = gr.Slider(
211
- info="Scale for classifier-free guidance",
212
- label="Guidance scale",
213
- minimum=1,
214
- maximum=20,
215
- step=0.1,
216
- value=5.0,
217
  )
218
- with gr.Row():
219
- num_inference_steps_base = gr.Slider(
220
- info="Number of denoising steps",
221
- label="Number of inference steps",
222
- minimum=10,
223
- maximum=100,
224
  step=1,
225
- value=25,
226
- )
227
- with gr.Row():
228
- strength_img2img = gr.Slider(
229
- info="Strength for Img2Img",
230
- label="Strength",
231
- minimum=0,
232
- maximum=1,
233
- step=0.01,
234
- value=0.7,
235
  )
236
 
237
- use_negative_prompt.change(
238
- fn=lambda x: gr.update(visible=x),
239
- inputs=use_negative_prompt,
240
- outputs=negative_prompt,
241
- queue=False,
242
- api_name=False,
243
- )
244
- use_prompt_2.change(
245
- fn=lambda x: gr.update(visible=x),
246
- inputs=use_prompt_2,
247
- outputs=prompt_2,
248
- queue=False,
249
- api_name=False,
250
- )
251
- use_negative_prompt_2.change(
252
- fn=lambda x: gr.update(visible=x),
253
- inputs=use_negative_prompt_2,
254
- outputs=negative_prompt_2,
255
- queue=False,
256
- api_name=False,
257
- )
258
- use_vae.change(
259
- fn=lambda x: gr.update(visible=x),
260
- inputs=use_vae,
261
- outputs=vaecall,
262
- queue=False,
263
- api_name=False,
264
- )
265
- use_lora.change(
266
- fn=lambda x: gr.update(visible=x),
267
- inputs=use_lora,
268
- outputs=lora,
269
- queue=False,
270
- api_name=False,
271
- )
272
- use_img2img.change(
273
- fn=lambda x: gr.update(visible=x),
274
- inputs=use_img2img,
275
- outputs=url,
276
- queue=False,
277
- api_name=False,
278
- )
279
-
280
- gr.on(
281
- triggers=[
282
- prompt.submit,
283
- negative_prompt.submit,
284
- prompt_2.submit,
285
- negative_prompt_2.submit,
286
- run_button.click,
287
- ],
288
- fn=randomize_seed_fn,
289
- inputs=[seed, randomize_seed],
290
- outputs=seed,
291
- queue=False,
292
- api_name=False,
293
- ).then(
294
- fn=generate,
295
- inputs=[
296
- prompt,
297
- negative_prompt,
298
- prompt_2,
299
- negative_prompt_2,
300
- use_negative_prompt,
301
- use_prompt_2,
302
- use_negative_prompt_2,
303
- seed,
304
- width,
305
- height,
306
- guidance_scale_base,
307
- num_inference_steps_base,
308
- strength_img2img,
309
- use_vae,
310
- use_lora,
311
- model,
312
- vaecall,
313
- lora,
314
- lora_scale,
315
- use_img2img,
316
- url,
317
- ],
318
- outputs=result,
319
- api_name="run",
320
- )
321
 
322
- if __name__ == "__main__":
323
- demo.queue(default_concurrency_limit=10).launch()
 
1
+ import gradio as gr
2
 
3
+ from diffusers import DiffusionPipeline, LCMScheduler
4
+ import torch
5
 
6
+ import base64
7
+ from io import BytesIO
8
  import os
9
+ import gc
10
+ import warnings
11
 
12
+ # Only used when MULTI_GPU set to True
13
+ from helper import UNetDataParallel
14
+ from share_btn import community_icon_html, loading_icon_html, share_js
15
+
16
+ # SDXL code: https://github.com/huggingface/diffusers/pull/3859
17
+
18
+ # Process environment variables
19
+ # Use `segmind/SSD-1B` (distilled SDXL) for faster generation.
20
+ use_ssd = os.getenv("USE_SSD", "false").lower() == "true"
21
+ if use_ssd:
22
+ model_key_base = "segmind/SSD-1B"
23
+ model_key_refiner = "stabilityai/stable-diffusion-xl-refiner-1.0"
24
+ lcm_lora_id = "latent-consistency/lcm-lora-ssd-1b"
25
+ else:
26
+ model_key_base = "stabilityai/stable-diffusion-xl-base-1.0"
27
+ model_key_refiner = "stabilityai/stable-diffusion-xl-refiner-1.0"
28
+ lcm_lora_id = "latent-consistency/lcm-lora-sdxl"
29
+
30
+ # Use LCM LoRA (enabled by default)
31
+ if "ENABLE_LCM" not in os.environ:
32
+ warnings.warn("`ENABLE_LCM` environment variable is not set. LCM LoRA will be loaded by default and refiner will be disabled by default. You can set it to `False` to turn off LCM LoRA.")
33
+
34
+ enable_lcm = os.getenv("ENABLE_LCM", "true").lower() == "true"
35
+ # Use refiner (disabled by default if LCM is enabled)
36
+ enable_refiner = os.getenv("ENABLE_REFINER", "false" if enable_lcm or use_ssd else "true").lower() == "true"
37
+ # Output images before the refiner and after the refiner
38
+ output_images_before_refiner = os.getenv("OUTPUT_IMAGES_BEFORE_REFINER", "false").lower() == "true"
39
+
40
+ offload_base = os.getenv("OFFLOAD_BASE", "false").lower() == "true"
41
+ offload_refiner = os.getenv("OFFLOAD_REFINER", "true").lower() == "true"
42
+
43
+ # Generate how many images by default
44
+ default_num_images = int(os.getenv("DEFAULT_NUM_IMAGES", "4"))
45
+ if default_num_images < 1:
46
+ default_num_images = 1
47
+
48
+ # Create public link
49
+ share = os.getenv("SHARE", "false").lower() == "true"
50
+
51
+ print("Loading model", model_key_base)
52
+ pipe = DiffusionPipeline.from_pretrained(model_key_base, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
53
+
54
+ if enable_lcm:
55
+ pipe.load_lora_weights(lcm_lora_id)
56
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
57
+
58
+ multi_gpu = os.getenv("MULTI_GPU", "false").lower() == "true"
59
+
60
+ if multi_gpu:
61
+ pipe.unet = UNetDataParallel(pipe.unet)
62
+ pipe.unet.config, pipe.unet.dtype, pipe.unet.add_embedding = pipe.unet.module.config, pipe.unet.module.dtype, pipe.unet.module.add_embedding
63
+ pipe.to("cuda")
64
+ else:
65
+ if offload_base:
66
+ pipe.enable_model_cpu_offload()
67
+ else:
68
+ pipe.to("cuda")
69
+
70
+ # if using torch < 2.0
71
+ # pipe.enable_xformers_memory_efficient_attention()
72
+
73
+ # pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
74
+
75
+ if enable_refiner:
76
+ print("Loading model", model_key_refiner)
77
+ pipe_refiner = DiffusionPipeline.from_pretrained(model_key_refiner, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
78
+ if multi_gpu:
79
+ pipe_refiner.unet = UNetDataParallel(pipe_refiner.unet)
80
+ pipe_refiner.unet.config, pipe_refiner.unet.dtype, pipe_refiner.unet.add_embedding = pipe_refiner.unet.module.config, pipe_refiner.unet.module.dtype, pipe_refiner.unet.module.add_embedding
81
+ pipe_refiner.to("cuda")
82
+ else:
83
+ if offload_refiner:
84
+ pipe_refiner.enable_model_cpu_offload()
 
 
 
 
 
85
  else:
86
+ pipe_refiner.to("cuda")
87
+
88
+ # if using torch < 2.0
89
+ # pipe_refiner.enable_xformers_memory_efficient_attention()
90
+
91
+ # pipe_refiner.unet = torch.compile(pipe_refiner.unet, mode="reduce-overhead", fullgraph=True)
92
+
93
+ # NOTE: we do not have word list filtering in this gradio demo
94
+
95
+ is_gpu_busy = False
96
+ def infer(prompt, negative, scale, samples=4, steps=50, refiner_strength=0.3, seed=-1):
97
+ prompt, negative = [prompt] * samples, [negative] * samples
98
+
99
+ g = torch.Generator(device="cuda")
100
+ if seed != -1:
101
+ g.manual_seed(seed)
 
 
 
 
 
 
 
 
 
102
  else:
103
+ g.seed()
104
+
105
+ images_b64_list = []
106
+
107
+ if not enable_refiner or output_images_before_refiner:
108
+ images = pipe(prompt=prompt, negative_prompt=negative, guidance_scale=scale, num_inference_steps=steps, generator=g).images
109
+ else:
110
+ # This skips the decoding and re-encoding for refinement.
111
+ images = pipe(prompt=prompt, negative_prompt=negative, guidance_scale=scale, num_inference_steps=steps, output_type="latent", generator=g).images
112
+
113
+ gc.collect()
114
+ torch.cuda.empty_cache()
115
+
116
+ if enable_refiner:
117
+ if output_images_before_refiner:
118
+ for image in images:
119
+ buffered = BytesIO()
120
+ image.save(buffered, format="JPEG")
121
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
122
+
123
+ image_b64 = (f"data:image/jpeg;base64,{img_str}")
124
+ images_b64_list.append(image_b64)
125
+
126
+ images = pipe_refiner(prompt=prompt, negative_prompt=negative, image=images, num_inference_steps=steps, strength=refiner_strength, generator=g).images
127
+
128
+ gc.collect()
129
+ torch.cuda.empty_cache()
130
+
131
+ for image in images:
132
+ buffered = BytesIO()
133
+ image.save(buffered, format="JPEG")
134
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
135
+
136
+ image_b64 = (f"data:image/jpeg;base64,{img_str}")
137
+ images_b64_list.append(image_b64)
138
+
139
+ return images_b64_list
140
+
141
+ # Reference: https://huggingface.co/spaces/google/sdxl/blob/main/app.py#L139
142
+ css = """
143
+ .gradio-container {
144
+ font-family: 'IBM Plex Sans', sans-serif;
145
+ }
146
+ .gr-button {
147
+ color: white;
148
+ border-color: black;
149
+ background: black;
150
+ }
151
+ input[type='range'] {
152
+ accent-color: black;
153
+ }
154
+ .dark input[type='range'] {
155
+ accent-color: #dfdfdf;
156
+ }
157
+ .gradio-container {
158
+ max-width: 730px !important;
159
+ margin: auto;
160
+ padding-top: 1.5rem;
161
+ }
162
+ #gallery {
163
+ min-height: 22rem;
164
+ margin-bottom: 15px;
165
+ margin-left: auto;
166
+ margin-right: auto;
167
+ border-bottom-right-radius: .5rem !important;
168
+ border-bottom-left-radius: .5rem !important;
169
+ }
170
+ #gallery>div>.h-full {
171
+ min-height: 20rem;
172
+ }
173
+ .details:hover {
174
+ text-decoration: underline;
175
+ }
176
+ .gr-button {
177
+ white-space: nowrap;
178
+ }
179
+ .gr-button:focus {
180
+ border-color: rgb(147 197 253 / var(--tw-border-opacity));
181
+ outline: none;
182
+ box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
183
+ --tw-border-opacity: 1;
184
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
185
+ --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
186
+ --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
187
+ --tw-ring-opacity: .5;
188
+ }
189
+ #advanced-btn {
190
+ font-size: .7rem !important;
191
+ line-height: 19px;
192
+ margin-top: 12px;
193
+ margin-bottom: 12px;
194
+ padding: 2px 8px;
195
+ border-radius: 14px !important;
196
+ }
197
+ #advanced-options {
198
+ display: none;
199
+ margin-bottom: 20px;
200
+ }
201
+ .footer {
202
+ margin-bottom: 45px;
203
+ margin-top: 35px;
204
+ text-align: center;
205
+ border-bottom: 1px solid #e5e5e5;
206
+ }
207
+ .footer>p {
208
+ font-size: .8rem;
209
+ display: inline-block;
210
+ padding: 10px 10px;
211
+ transform: translateY(10px);
212
+ background: white;
213
+ }
214
+ .dark .footer {
215
+ border-color: #303030;
216
+ }
217
+ .dark .footer>p {
218
+ background: #0b0f19;
219
+ }
220
+ .acknowledgments h4{
221
+ margin: 1.25em 0 .25em 0;
222
+ font-weight: bold;
223
+ font-size: 115%;
224
+ }
225
+ .animate-spin {
226
+ animation: spin 1s linear infinite;
227
+ }
228
+ @keyframes spin {
229
+ from {
230
+ transform: rotate(0deg);
231
+ }
232
+ to {
233
+ transform: rotate(360deg);
234
+ }
235
+ }
236
+ #share-btn-container {
237
+ display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
238
+ margin-top: 10px;
239
+ margin-left: auto;
240
+ }
241
+ #share-btn {
242
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
243
+ }
244
+ #share-btn * {
245
+ all: unset;
246
+ }
247
+ #share-btn-container div:nth-child(-n+2){
248
+ width: auto !important;
249
+ min-height: 0px !important;
250
+ }
251
+ #share-btn-container .wrap {
252
+ display: none !important;
253
+ }
254
+
255
+ .gr-form{
256
+ flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
257
+ }
258
+ #prompt-container{
259
+ gap: 0;
260
+ margin: 0 10px 0 0;
261
+ }
262
+ #generate-image-btn {
263
+ margin: 0 0 0 10px;
264
+ }
265
+ #prompt-text-input, #negative-prompt-text-input{padding: .45rem 0.625rem}
266
+ #component-16{border-top-width: 1px!important;margin-top: 1em}
267
+ .image_duplication{position: absolute; width: 100px; left: 50px}
268
+ """
269
+
270
+ block = gr.Blocks(css=css)
271
+
272
+ default_guidance_scale = 1 if enable_lcm else 9
273
+
274
+ examples = [
275
+ [
276
+ 'A high tech solarpunk utopia in the Amazon rainforest',
277
+ 'low quality',
278
+ default_guidance_scale
279
+ ],
280
+ [
281
+ 'A pikachu fine dining with a view to the Eiffel Tower',
282
+ 'low quality',
283
+ default_guidance_scale
284
+ ],
285
+ [
286
+ 'A mecha robot in a favela in expressionist style',
287
+ 'low quality, 3d, photorealistic',
288
+ default_guidance_scale
289
+ ],
290
+ [
291
+ 'an insect robot preparing a delicious meal',
292
+ 'low quality, illustration',
293
+ default_guidance_scale
294
+ ],
295
+ [
296
+ "A small cabin on top of a snowy mountain in the style of Disney, artstation",
297
+ 'low quality, ugly',
298
+ default_guidance_scale
299
+ ],
300
+ ]
301
+
302
+
303
+ with block:
304
  gr.HTML(
305
+ f"""
306
+ <div style="text-align: center; margin: 0 auto;">
307
+ <div
308
+ style="
309
+ display: inline-flex;
310
+ align-items: center;
311
+ gap: 0.8rem;
312
+ font-size: 1.75rem;
313
+ "
314
+ >
315
+ <svg
316
+ width="0.65em"
317
+ height="0.65em"
318
+ viewBox="0 0 115 115"
319
+ fill="none"
320
+ xmlns="http://www.w3.org/2000/svg"
321
+ >
322
+ <rect width="23" height="23" fill="white"></rect>
323
+ <rect y="69" width="23" height="23" fill="white"></rect>
324
+ <rect x="23" width="23" height="23" fill="#AEAEAE"></rect>
325
+ <rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect>
326
+ <rect x="46" width="23" height="23" fill="white"></rect>
327
+ <rect x="46" y="69" width="23" height="23" fill="white"></rect>
328
+ <rect x="69" width="23" height="23" fill="black"></rect>
329
+ <rect x="69" y="69" width="23" height="23" fill="black"></rect>
330
+ <rect x="92" width="23" height="23" fill="#D9D9D9"></rect>
331
+ <rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect>
332
+ <rect x="115" y="46" width="23" height="23" fill="white"></rect>
333
+ <rect x="115" y="115" width="23" height="23" fill="white"></rect>
334
+ <rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect>
335
+ <rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect>
336
+ <rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect>
337
+ <rect x="92" y="69" width="23" height="23" fill="white"></rect>
338
+ <rect x="69" y="46" width="23" height="23" fill="white"></rect>
339
+ <rect x="69" y="115" width="23" height="23" fill="white"></rect>
340
+ <rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect>
341
+ <rect x="46" y="46" width="23" height="23" fill="black"></rect>
342
+ <rect x="46" y="115" width="23" height="23" fill="black"></rect>
343
+ <rect x="46" y="69" width="23" height="23" fill="black"></rect>
344
+ <rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect>
345
+ <rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect>
346
+ <rect x="23" y="69" width="23" height="23" fill="black"></rect>
347
+ </svg>
348
+ <h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
349
+ Stable Diffusion XL 1.0 Demo
350
+ </h1>
351
+ </div>
352
+ <p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">
353
+ Stable Diffusion XL 1.0 is the latest text-to-image model from StabilityAI.
354
+ <br/>
355
+ Source code of this space is on
356
+ <a
357
+ href="https://github.com/TonyLianLong/stable-diffusion-xl-demo"
358
+ style="text-decoration: underline;"
359
+ target="_blank"
360
+ >TonyLianLong/stable-diffusion-xl-demo</a>.
361
+ </p>
362
+ </div>
363
+ """
364
  )
 
365
  with gr.Group():
366
+ with gr.Box():
367
+ with gr.Row(elem_id="prompt-container", equal_height=True, style=dict(mobile_collapse=False)):
368
+ with gr.Column():
369
+ text = gr.Textbox(
370
+ label="Enter your prompt",
371
+ show_label=False,
372
+ max_lines=1,
373
+ placeholder="Enter your prompt",
374
+ elem_id="prompt-text-input",
375
+ ).style(
376
+ border=(True, False, True, True),
377
+ rounded=(True, False, False, True),
378
+ container=False,
379
+ )
380
+ negative = gr.Textbox(
381
+ label="Enter your negative prompt",
382
+ show_label=False,
383
+ max_lines=1,
384
+ placeholder="Enter a negative prompt",
385
+ elem_id="negative-prompt-text-input",
386
+ ).style(
387
+ border=(True, False, True, True),
388
+ rounded=(True, False, False, True),
389
+ container=False,
390
+ )
391
+ btn = gr.Button("Generate image", elem_id="generate-image-btn").style(
392
+ rounded=(False, True, True, False),
393
+ full_width=False,
394
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395
 
396
+ gallery = gr.Gallery(
397
+ label="Generated images", show_label=False, elem_id="gallery"
398
+ ).style(grid=[2], height="auto")
399
+
400
+ with gr.Group(elem_id="container-advanced-btns"):
401
+ #advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
402
+ with gr.Group(elem_id="share-btn-container"):
403
+ community_icon = gr.HTML(community_icon_html)
404
+ loading_icon = gr.HTML(loading_icon_html)
405
+ share_button = gr.Button("Share to community", elem_id="share-btn")
406
+
407
+ with gr.Accordion("Advanced settings", open=False):
408
+ # gr.Markdown("Advanced settings are temporarily unavailable")
409
+ samples = gr.Slider(label="Images", minimum=1, maximum=max(16 if enable_lcm else 4, default_num_images), value=default_num_images, step=1)
410
+ if enable_lcm:
411
+ steps = gr.Slider(label="Steps", minimum=1, maximum=10, value=4, step=1)
412
+ else:
413
+ steps = gr.Slider(label="Steps", minimum=1, maximum=250, value=50, step=1)
414
+
415
+ if enable_refiner:
416
+ refiner_strength = gr.Slider(label="Refiner Strength", minimum=0, maximum=1.0, value=0.3, step=0.1)
417
+ else:
418
+ refiner_strength = gr.Slider(label="Refiner Strength (refiner not enabled)", minimum=0, maximum=0, value=0, step=0)
419
+ guidance_scale = gr.Slider(
420
+ label="Guidance Scale", minimum=0, maximum=50, value=default_guidance_scale, step=0.1
 
 
 
 
 
 
 
421
  )
422
+
423
+ seed = gr.Slider(
424
+ label="Seed",
425
+ minimum=-1,
426
+ maximum=2147483647,
 
427
  step=1,
428
+ randomize=True,
 
 
 
 
 
 
 
 
 
429
  )
430
 
431
+ ex = gr.Examples(examples=examples, fn=infer, inputs=[text, negative, guidance_scale], outputs=[gallery, community_icon, loading_icon, share_button], cache_examples=False)
432
+ ex.dataset.headers = [""]
433
+ negative.submit(infer, inputs=[text, negative, guidance_scale, samples, steps, refiner_strength, seed], outputs=[gallery], postprocess=False)
434
+ text.submit(infer, inputs=[text, negative, guidance_scale, samples, steps, refiner_strength, seed], outputs=[gallery], postprocess=False)
435
+ btn.click(infer, inputs=[text, negative, guidance_scale, samples, steps, refiner_strength, seed], outputs=[gallery], postprocess=False)
436
+
437
+ #advanced_button.click(
438
+ # None,
439
+ # [],
440
+ # text,
441
+ # _js="""
442
+ # () => {
443
+ # const options = document.querySelector("body > gradio-app").querySelector("#advanced-options");
444
+ # options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none";
445
+ # }""",
446
+ #)
447
+ share_button.click(
448
+ None,
449
+ [],
450
+ [],
451
+ _js=share_js,
452
+ )
453
+ gr.HTML(
454
+ f"""
455
+ <div class="footer">
456
+ <p>
457
+ This space uses {model_key_base} model{" with " + lcm_lora_id + " LCM LoRA" if enable_lcm else ""}. - Gradio Demo by 🤗 Hugging Face and <a style="text-decoration: underline;" href="https://tonylian.com/">Long (Tony) Lian</a> <br/>
458
+ </p>
459
+ </div>
460
+ """
461
+ )
462
+ with gr.Accordion(label="License", open=False):
463
+ gr.HTML(
464
+ """<div class="acknowledgments">
465
+ <p><h4>LICENSE</h4>
466
+ The SDXL 1.0 model is licensed with a <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md" style="text-decoration: underline;" target="_blank">Stability AI CreativeML Open RAIL++-M</a> license. The License allows users to take advantage of the model in a wide range of settings (including free use and redistribution) as long as they respect the specific use case restrictions outlined, which correspond to model applications the licensor deems ill-suited for the model or are likely to cause harm. For the full list of restrictions please <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md" style="text-decoration: underline;" target="_blank">read the license</a>.
467
+ <p><h4>Biases and content acknowledgment</h4>
468
+ Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the <a href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0" style="text-decoration: underline;" target="_blank">model card</a></p>
469
+ </div>
470
+ """
471
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472
 
473
+ block.queue().launch(share=share)