prithivMLmods commited on
Commit
b25aff8
·
verified ·
1 Parent(s): 816e0d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -320
app.py CHANGED
@@ -2,165 +2,62 @@ import os
2
  import random
3
  import uuid
4
  import json
5
- import time
6
- import asyncio
7
- import re
8
- from threading import Thread
9
-
10
  import gradio as gr
11
- import spaces
12
- import torch
13
  import numpy as np
14
  from PIL import Image
15
- import edge_tts
16
-
17
- from transformers import (
18
- AutoModelForCausalLM,
19
- AutoTokenizer,
20
- TextIteratorStreamer,
21
- Qwen2VLForConditionalGeneration,
22
- AutoProcessor,
23
- )
24
- from transformers.image_utils import load_image
25
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
26
 
27
- DESCRIPTION = """
28
- # SDXL LoRA DLC 🎃
29
  """
30
 
31
  css = '''
32
- h1 {
33
- text-align: center;
34
- display: block;
35
- }
36
-
37
- #duplicate-button {
38
- margin: auto;
39
- color: #fff;
40
- background: #1565c0;
41
- border-radius: 100vh;
42
  }
43
  '''
44
 
45
- MAX_MAX_NEW_TOKENS = 2048
46
- DEFAULT_MAX_NEW_TOKENS = 1024
47
- MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
48
-
49
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
50
-
51
- # -----------------------
52
- # Progress Bar Helper
53
- # -----------------------
54
- def progress_bar_html(label: str) -> str:
55
- """
56
- Returns an HTML snippet for a thin progress bar with a label.
57
- The progress bar is styled as a dark red animated bar.
58
- """
59
- return f'''
60
- <div style="display: flex; align-items: center;">
61
- <span style="margin-right: 10px; font-size: 14px;">{label}</span>
62
- <div style="width: 110px; height: 5px; background-color: #DDA0DD; border-radius: 2px; overflow: hidden;">
63
- <div style="width: 100%; height: 100%; background-color: #FF00FF; animation: loading 1.5s linear infinite;"></div>
64
- </div>
65
- </div>
66
- <style>
67
- @keyframes loading {{
68
- 0% {{ transform: translateX(-100%); }}
69
- 100% {{ transform: translateX(100%); }}
70
- }}
71
- </style>
72
- '''
73
 
74
- # -----------------------
75
- # Text Generation Setup
76
- # -----------------------
77
- model_id = "prithivMLmods/FastThink-0.5B-Tiny"
78
- tokenizer = AutoTokenizer.from_pretrained(model_id)
79
- model = AutoModelForCausalLM.from_pretrained(
80
- model_id,
81
- device_map="auto",
82
- torch_dtype=torch.bfloat16,
83
- )
84
- model.eval()
85
 
86
- TTS_VOICES = [
87
- "en-US-JennyNeural", # @tts1
88
- "en-US-GuyNeural", # @tts2
89
- ]
 
90
 
91
- # -----------------------
92
- # Multimodal OCR Setup
93
- # -----------------------
94
- MODEL_ID = "prithivMLmods/Qwen2-VL-OCR2-2B-Instruct"
95
- processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
96
- model_m = Qwen2VLForConditionalGeneration.from_pretrained(
97
  MODEL_ID,
98
- trust_remote_code=True,
99
- torch_dtype=torch.float16
100
- ).to("cuda").eval()
101
-
102
- async def text_to_speech(text: str, voice: str, output_file="output.mp3"):
103
- """Convert text to speech using Edge TTS and save as MP3"""
104
- communicate = edge_tts.Communicate(text, voice)
105
- await communicate.save(output_file)
106
- return output_file
107
 
108
- def clean_chat_history(chat_history):
109
- """
110
- Filter out any chat entries whose "content" is not a string.
111
- """
112
- cleaned = []
113
- for msg in chat_history:
114
- if isinstance(msg, dict) and isinstance(msg.get("content"), str):
115
- cleaned.append(msg)
116
- return cleaned
117
 
118
- # -----------------------
119
- # Stable Diffusion Image Generation Setup
120
- # -----------------------
121
 
122
  MAX_SEED = np.iinfo(np.int32).max
123
- USE_TORCH_COMPILE = False
124
- ENABLE_CPU_OFFLOAD = False
125
-
126
- if torch.cuda.is_available():
127
- pipe = StableDiffusionXLPipeline.from_pretrained(
128
- "SG161222/RealVisXL_V4.0_Lightning",
129
- torch_dtype=torch.float16,
130
- use_safetensors=True,
131
- )
132
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
133
-
134
- # LoRA options with one example for each.
135
- LORA_OPTIONS = {
136
- "Realism": ("prithivMLmods/Canopus-Realism-LoRA", "Canopus-Realism-LoRA.safetensors", "rlms"),
137
- "Pixar": ("prithivMLmods/Canopus-Pixar-Art", "Canopus-Pixar-Art.safetensors", "pixar"),
138
- "Photoshoot": ("prithivMLmods/Canopus-Photo-Shoot-Mini-LoRA", "Canopus-Photo-Shoot-Mini-LoRA.safetensors", "photo"),
139
- "Clothing": ("prithivMLmods/Canopus-Clothing-Adp-LoRA", "Canopus-Dress-Clothing-LoRA.safetensors", "clth"),
140
- "Interior": ("prithivMLmods/Canopus-Interior-Architecture-0.1", "Canopus-Interior-Architecture-0.1δ.safetensors", "arch"),
141
- "Fashion": ("prithivMLmods/Canopus-Fashion-Product-Dilation", "Canopus-Fashion-Product-Dilation.safetensors", "fashion"),
142
- "Minimalistic": ("prithivMLmods/Pegasi-Minimalist-Image-Style", "Pegasi-Minimalist-Image-Style.safetensors", "minimalist"),
143
- "Modern": ("prithivMLmods/Canopus-Modern-Clothing-Design", "Canopus-Modern-Clothing-Design.safetensors", "mdrnclth"),
144
- "Animaliea": ("prithivMLmods/Canopus-Animaliea-Artism", "Canopus-Animaliea-Artism.safetensors", "Animaliea"),
145
- "Wallpaper": ("prithivMLmods/Canopus-Liquid-Wallpaper-Art", "Canopus-Liquid-Wallpaper-Minimalize-LoRA.safetensors", "liquid"),
146
- "Cars": ("prithivMLmods/Canes-Cars-Model-LoRA", "Canes-Cars-Model-LoRA.safetensors", "car"),
147
- "PencilArt": ("prithivMLmods/Canopus-Pencil-Art-LoRA", "Canopus-Pencil-Art-LoRA.safetensors", "Pencil Art"),
148
- "ArtMinimalistic": ("prithivMLmods/Canopus-Art-Medium-LoRA", "Canopus-Art-Medium-LoRA.safetensors", "mdm"),
149
- }
150
-
151
- # Load all LoRA weights
152
- for model_name, weight_name, adapter_name in LORA_OPTIONS.values():
153
- pipe.load_lora_weights(model_name, weight_name=weight_name, adapter_name=adapter_name)
154
- pipe.to("cuda")
155
- else:
156
- pipe = StableDiffusionXLPipeline.from_pretrained(
157
- "SG161222/RealVisXL_V4.0_Lightning",
158
- torch_dtype=torch.float32,
159
- use_safetensors=True,
160
- ).to(device)
161
 
162
- def save_image(img: Image.Image) -> str:
163
- """Save a PIL image with a unique filename and return the path."""
164
  unique_name = str(uuid.uuid4()) + ".png"
165
  img.save(unique_name)
166
  return unique_name
@@ -170,200 +67,156 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
170
  seed = random.randint(0, MAX_SEED)
171
  return seed
172
 
173
- @spaces.GPU(duration=180, enable_queue=True)
174
- def generate_image(
175
  prompt: str,
176
  negative_prompt: str = "",
177
- seed: int = 0,
 
178
  width: int = 1024,
179
  height: int = 1024,
180
- guidance_scale: float = 3.0,
181
- randomize_seed: bool = True,
182
- lora_model: str = "Realism",
 
 
183
  progress=gr.Progress(track_tqdm=True),
184
  ):
185
  seed = int(randomize_seed_fn(seed, randomize_seed))
186
- effective_negative_prompt = negative_prompt # Use provided negative prompt if any
187
- model_name, weight_name, adapter_name = LORA_OPTIONS[lora_model]
188
- pipe.set_adapters(adapter_name)
189
- outputs = pipe(
190
- prompt=prompt,
191
- negative_prompt=effective_negative_prompt,
192
- width=width,
193
- height=height,
194
- guidance_scale=guidance_scale,
195
- num_inference_steps=28,
196
- num_images_per_prompt=1,
197
- cross_attention_kwargs={"scale": 0.65},
198
- output_type="pil",
199
- )
200
- images = outputs.images
 
 
 
 
 
 
 
 
 
 
 
201
  image_paths = [save_image(img) for img in images]
202
  return image_paths, seed
203
 
204
- # -----------------------
205
- # Main Chat/Generation Function
206
- # -----------------------
207
- @spaces.GPU
208
- def generate(
209
- input_dict: dict,
210
- chat_history: list[dict],
211
- max_new_tokens: int = 1024,
212
- temperature: float = 0.6,
213
- top_p: float = 0.9,
214
- top_k: int = 50,
215
- repetition_penalty: float = 1.2,
216
- ):
217
- """
218
- Generates chatbot responses with support for multimodal input, TTS, and image generation.
219
- Special commands:
220
- - "@tts1" or "@tts2": triggers text-to-speech.
221
- - "@<lora_command>": triggers image generation using the new LoRA pipeline.
222
- Available commands (case-insensitive): @realism, @pixar, @photoshoot, @clothing, @interior, @fashion,
223
- @minimalistic, @modern, @animaliea, @wallpaper, @cars, @pencilart, @artminimalistic.
224
- """
225
- text = input_dict["text"]
226
- files = input_dict.get("files", [])
227
-
228
- # Check for image generation command based on LoRA tags.
229
- lora_mapping = { key.lower(): key for key in LORA_OPTIONS }
230
- for key_lower, key in lora_mapping.items():
231
- command_tag = "@" + key_lower
232
- if text.strip().lower().startswith(command_tag):
233
- prompt_text = text.strip()[len(command_tag):].strip()
234
- yield progress_bar_html(f"Processing Image Generation ({key} style)")
235
- image_paths, used_seed = generate_image(
236
- prompt=prompt_text,
237
- negative_prompt="",
238
- seed=1,
239
- width=1024,
240
- height=1024,
241
- guidance_scale=3,
242
- randomize_seed=True,
243
- lora_model=key,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  )
245
- yield progress_bar_html("Finalizing Image Generation")
246
- yield gr.Image(image_paths[0])
247
- return
248
-
249
- # Check for TTS command (@tts1 or @tts2)
250
- tts_prefix = "@tts"
251
- is_tts = any(text.strip().lower().startswith(f"{tts_prefix}{i}") for i in range(1, 3))
252
- voice_index = next((i for i in range(1, 3) if text.strip().lower().startswith(f"{tts_prefix}{i}")), None)
253
-
254
- if is_tts and voice_index:
255
- voice = TTS_VOICES[voice_index - 1]
256
- text = text.replace(f"{tts_prefix}{voice_index}", "").strip()
257
- conversation = [{"role": "user", "content": text}]
258
- else:
259
- voice = None
260
- text = text.replace(tts_prefix, "").strip()
261
- conversation = clean_chat_history(chat_history)
262
- conversation.append({"role": "user", "content": text})
263
-
264
- if files:
265
- if len(files) > 1:
266
- images = [load_image(image) for image in files]
267
- elif len(files) == 1:
268
- images = [load_image(files[0])]
269
- else:
270
- images = []
271
- messages = [{
272
- "role": "user",
273
- "content": [
274
- *[{"type": "image", "image": image} for image in images],
275
- {"type": "text", "text": text},
276
- ]
277
- }]
278
- prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
279
- inputs = processor(text=[prompt], images=images, return_tensors="pt", padding=True).to("cuda")
280
- streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
281
- generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens}
282
- thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
283
- thread.start()
284
-
285
- buffer = ""
286
- yield progress_bar_html("Processing with Qwen2VL Ocr")
287
- for new_text in streamer:
288
- buffer += new_text
289
- buffer = buffer.replace("<|im_end|>", "")
290
- time.sleep(0.01)
291
- yield buffer
292
- else:
293
- input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
294
- if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
295
- input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
296
- gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
297
- input_ids = input_ids.to(model.device)
298
- streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
299
- generation_kwargs = {
300
- "input_ids": input_ids,
301
- "streamer": streamer,
302
- "max_new_tokens": max_new_tokens,
303
- "do_sample": True,
304
- "top_p": top_p,
305
- "top_k": top_k,
306
- "temperature": temperature,
307
- "num_beams": 1,
308
- "repetition_penalty": repetition_penalty,
309
- }
310
- t = Thread(target=model.generate, kwargs=generation_kwargs)
311
- t.start()
312
-
313
- outputs = []
314
- for new_text in streamer:
315
- outputs.append(new_text)
316
- yield "".join(outputs)
317
 
318
- final_response = "".join(outputs)
319
- yield final_response
 
 
 
320
 
321
- if is_tts and voice:
322
- output_file = asyncio.run(text_to_speech(final_response, voice))
323
- yield gr.Audio(output_file, autoplay=True)
 
 
 
324
 
325
- # -----------------------
326
- # Gradio Chat Interface
327
- # -----------------------
328
- demo = gr.ChatInterface(
329
- fn=generate,
330
- additional_inputs=[
331
- gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS),
332
- gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6),
333
- gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9),
334
- gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50),
335
- gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2),
336
- ],
337
- examples=[
338
- ['@realism Chocolate dripping from a donut against a yellow background, in the style of brocore, hyper-realistic'],
339
- ["@pixar A young man with light brown wavy hair and light brown eyes sitting in an armchair and looking directly at the camera, pixar style, disney pixar, office background, ultra detailed, 1 man"],
340
- ["@realism A futuristic cityscape with neon lights"],
341
- ["@photoshoot A portrait of a person with dramatic lighting"],
342
- [{"text": "summarize the letter", "files": ["examples/1.png"]}],
343
- ["Python Program for Array Rotation"],
344
- ["@tts1 Who is Nikola Tesla, and why did he die?"],
345
- ["@clothing Fashionable streetwear in an urban environment"],
346
- ["@interior A modern living room interior with minimalist design"],
347
- ["@fashion A runway model in haute couture"],
348
- ["@minimalistic A simple and elegant design of a serene landscape"],
349
- ["@modern A contemporary art piece with abstract geometric shapes"],
350
- ["@animaliea A cute animal portrait with vibrant colors"],
351
- ["@wallpaper A scenic mountain range perfect for a desktop wallpaper"],
352
- ["@cars A sleek sports car cruising on a city street"],
353
- ["@pencilart A detailed pencil sketch of a historic building"],
354
- ["@artminimalistic An artistic minimalist composition with subtle tones"],
355
- ["@tts2 What causes rainbows to form?"],
356
- ],
357
- cache_examples=False,
358
- type="messages",
359
- description=DESCRIPTION,
360
- css=css,
361
- fill_height=True,
362
- textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple", placeholder="default [text, vision] , scroll down examples to explore more art styles"),
363
- stop_btn="Stop Generation",
364
- theme=gr.themes.Soft(),
365
- multimodal=True,
366
- )
367
 
368
  if __name__ == "__main__":
369
- demo.queue(max_size=30).launch(ssr_mode=False, share=True)
 
2
  import random
3
  import uuid
4
  import json
 
 
 
 
 
5
  import gradio as gr
 
 
6
  import numpy as np
7
  from PIL import Image
8
+ import spaces
9
+ import torch
 
 
 
 
 
 
 
 
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
 
12
+ DESCRIPTIONx = """## STABLE HAMSTER 🐹
13
+
14
  """
15
 
16
  css = '''
17
+ .gradio-container{max-width: 560px !important}
18
+ h1{text-align:center}
19
+ footer {
20
+ visibility: hidden
 
 
 
 
 
 
21
  }
22
  '''
23
 
24
+ examples = [
25
+ "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
26
+ "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
27
+ "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
28
+ "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
29
+ "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
30
+
31
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
+ MODEL_ID = os.getenv("MODEL_VAL_PATH")
35
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
36
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
37
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
38
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
39
 
40
+ #Load model outside of function
41
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
42
+ pipe = StableDiffusionXLPipeline.from_pretrained(
 
 
 
43
  MODEL_ID,
44
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
45
+ use_safetensors=True,
46
+ add_watermarker=False,
47
+ ).to(device)
48
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
 
 
 
49
 
50
+ # <compile speedup >
51
+ if USE_TORCH_COMPILE:
52
+ pipe.compile()
 
 
 
 
 
 
53
 
54
+ # Offloading capacity (RAM)
55
+ if ENABLE_CPU_OFFLOAD:
56
+ pipe.enable_model_cpu_offload()
57
 
58
  MAX_SEED = np.iinfo(np.int32).max
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
+ def save_image(img):
 
61
  unique_name = str(uuid.uuid4()) + ".png"
62
  img.save(unique_name)
63
  return unique_name
 
67
  seed = random.randint(0, MAX_SEED)
68
  return seed
69
 
70
+ @spaces.GPU(duration=60, enable_queue=True)
71
+ def generate(
72
  prompt: str,
73
  negative_prompt: str = "",
74
+ use_negative_prompt: bool = False,
75
+ seed: int = 1,
76
  width: int = 1024,
77
  height: int = 1024,
78
+ guidance_scale: float = 3,
79
+ num_inference_steps: int = 25,
80
+ randomize_seed: bool = False,
81
+ use_resolution_binning: bool = True,
82
+ num_images: int = 4, # Number of images to generate
83
  progress=gr.Progress(track_tqdm=True),
84
  ):
85
  seed = int(randomize_seed_fn(seed, randomize_seed))
86
+ generator = torch.Generator(device=device).manual_seed(seed)
87
+
88
+ #Options
89
+ options = {
90
+ "prompt": [prompt] * num_images,
91
+ "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
92
+ "width": width,
93
+ "height": height,
94
+ "guidance_scale": guidance_scale,
95
+ "num_inference_steps": num_inference_steps,
96
+ "generator": generator,
97
+ "output_type": "pil",
98
+ }
99
+
100
+ if use_resolution_binning:
101
+ options["use_resolution_binning"] = True
102
+
103
+ #Images potential batches
104
+ images = []
105
+ for i in range(0, num_images, BATCH_SIZE):
106
+ batch_options = options.copy()
107
+ batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
108
+ if "negative_prompt" in batch_options:
109
+ batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
110
+ images.extend(pipe(**batch_options).images)
111
+
112
  image_paths = [save_image(img) for img in images]
113
  return image_paths, seed
114
 
115
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
116
+ gr.Markdown(DESCRIPTIONx)
117
+ with gr.Group():
118
+ with gr.Row():
119
+ prompt = gr.Text(
120
+ label="Prompt",
121
+ show_label=False,
122
+ max_lines=1,
123
+ placeholder="Enter your prompt",
124
+ container=False,
125
+ )
126
+ run_button = gr.Button("Run", scale=0)
127
+ result = gr.Gallery(label="Result", columns=2, show_label=False)
128
+ with gr.Accordion("Advanced options", open=False, visible=True):
129
+ num_images = gr.Slider(
130
+ label="Number of Images",
131
+ minimum=1,
132
+ maximum=4,
133
+ step=1,
134
+ value=4,
135
+ )
136
+ with gr.Row():
137
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
138
+ negative_prompt = gr.Text(
139
+ label="Negative prompt",
140
+ max_lines=5,
141
+ lines=4,
142
+ placeholder="Enter a negative prompt",
143
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
144
+ visible=True,
145
+ )
146
+ seed = gr.Slider(
147
+ label="Seed",
148
+ minimum=0,
149
+ maximum=MAX_SEED,
150
+ step=1,
151
+ value=0,
152
+ )
153
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
154
+ with gr.Row(visible=True):
155
+ width = gr.Slider(
156
+ label="Width",
157
+ minimum=512,
158
+ maximum=MAX_IMAGE_SIZE,
159
+ step=64,
160
+ value=1024,
161
+ )
162
+ height = gr.Slider(
163
+ label="Height",
164
+ minimum=512,
165
+ maximum=MAX_IMAGE_SIZE,
166
+ step=64,
167
+ value=1024,
168
+ )
169
+ with gr.Row():
170
+ guidance_scale = gr.Slider(
171
+ label="Guidance Scale",
172
+ minimum=0.1,
173
+ maximum=6,
174
+ step=0.1,
175
+ value=3.0,
176
+ )
177
+ num_inference_steps = gr.Slider(
178
+ label="Number of inference steps",
179
+ minimum=1,
180
+ maximum=25,
181
+ step=1,
182
+ value=23,
183
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
+ gr.Examples(
186
+ examples=examples,
187
+ inputs=prompt,
188
+ cache_examples=False
189
+ )
190
 
191
+ use_negative_prompt.change(
192
+ fn=lambda x: gr.update(visible=x),
193
+ inputs=use_negative_prompt,
194
+ outputs=negative_prompt,
195
+ api_name=False,
196
+ )
197
 
198
+ gr.on(
199
+ triggers=[
200
+ prompt.submit,
201
+ negative_prompt.submit,
202
+ run_button.click,
203
+ ],
204
+ fn=generate,
205
+ inputs=[
206
+ prompt,
207
+ negative_prompt,
208
+ use_negative_prompt,
209
+ seed,
210
+ width,
211
+ height,
212
+ guidance_scale,
213
+ num_inference_steps,
214
+ randomize_seed,
215
+ num_images
216
+ ],
217
+ outputs=[result, seed],
218
+ api_name="run",
219
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
  if __name__ == "__main__":
222
+ demo.queue(max_size=40).launch(ssr_mode=False)