Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,368 +1,285 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
import
|
12 |
-
import
|
13 |
-
import
|
14 |
-
from
|
15 |
-
import
|
16 |
-
|
17 |
-
from
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
pipe.
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
"
|
143 |
-
"
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
):
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
yield progress_bar_html("Processing with Qwen2VL Ocr")
|
287 |
-
for new_text in streamer:
|
288 |
-
buffer += new_text
|
289 |
-
buffer = buffer.replace("<|im_end|>", "")
|
290 |
-
time.sleep(0.01)
|
291 |
-
yield buffer
|
292 |
-
else:
|
293 |
-
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
|
294 |
-
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
295 |
-
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
296 |
-
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
297 |
-
input_ids = input_ids.to(model.device)
|
298 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
|
299 |
-
generation_kwargs = {
|
300 |
-
"input_ids": input_ids,
|
301 |
-
"streamer": streamer,
|
302 |
-
"max_new_tokens": max_new_tokens,
|
303 |
-
"do_sample": True,
|
304 |
-
"top_p": top_p,
|
305 |
-
"top_k": top_k,
|
306 |
-
"temperature": temperature,
|
307 |
-
"num_beams": 1,
|
308 |
-
"repetition_penalty": repetition_penalty,
|
309 |
-
}
|
310 |
-
t = Thread(target=model.generate, kwargs=generation_kwargs)
|
311 |
-
t.start()
|
312 |
-
|
313 |
-
outputs = []
|
314 |
-
for new_text in streamer:
|
315 |
-
outputs.append(new_text)
|
316 |
-
yield "".join(outputs)
|
317 |
-
|
318 |
-
final_response = "".join(outputs)
|
319 |
-
yield final_response
|
320 |
-
|
321 |
-
if is_tts and voice:
|
322 |
-
output_file = asyncio.run(text_to_speech(final_response, voice))
|
323 |
-
yield gr.Audio(output_file, autoplay=True)
|
324 |
-
|
325 |
-
# -----------------------
|
326 |
-
# Gradio Chat Interface
|
327 |
-
# -----------------------
|
328 |
-
demo = gr.ChatInterface(
|
329 |
-
fn=generate,
|
330 |
-
additional_inputs=[
|
331 |
-
gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS),
|
332 |
-
gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6),
|
333 |
-
gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9),
|
334 |
-
gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50),
|
335 |
-
gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2),
|
336 |
-
],
|
337 |
-
examples=[
|
338 |
-
['@realism Chocolate dripping from a donut against a yellow background, in the style of brocore, hyper-realistic'],
|
339 |
-
["@pixar A young man with light brown wavy hair and light brown eyes sitting in an armchair and looking directly at the camera, pixar style, disney pixar, office background, ultra detailed, 1 man"],
|
340 |
-
["@realism A futuristic cityscape with neon lights"],
|
341 |
-
["@photoshoot A portrait of a person with dramatic lighting"],
|
342 |
-
[{"text": "summarize the letter", "files": ["examples/1.png"]}],
|
343 |
-
["Python Program for Array Rotation"],
|
344 |
-
["@tts1 Who is Nikola Tesla, and why did he die?"],
|
345 |
-
["@clothing Fashionable streetwear in an urban environment"],
|
346 |
-
["@interior A modern living room interior with minimalist design"],
|
347 |
-
["@fashion A runway model in haute couture"],
|
348 |
-
["@minimalistic A simple and elegant design of a serene landscape"],
|
349 |
-
["@modern A contemporary art piece with abstract geometric shapes"],
|
350 |
-
["@animaliea A cute animal portrait with vibrant colors"],
|
351 |
-
["@wallpaper A scenic mountain range perfect for a desktop wallpaper"],
|
352 |
-
["@cars A sleek sports car cruising on a city street"],
|
353 |
-
["@pencilart A detailed pencil sketch of a historic building"],
|
354 |
-
["@artminimalistic An artistic minimalist composition with subtle tones"],
|
355 |
-
["@tts2 What causes rainbows to form?"],
|
356 |
-
],
|
357 |
-
cache_examples=False,
|
358 |
-
type="messages",
|
359 |
-
description=DESCRIPTION,
|
360 |
-
css=css,
|
361 |
-
fill_height=True,
|
362 |
-
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple", placeholder="default [text, vision] , scroll down examples to explore more art styles"),
|
363 |
-
stop_btn="Stop Generation",
|
364 |
-
multimodal=True,
|
365 |
-
)
|
366 |
-
|
367 |
-
if __name__ == "__main__":
|
368 |
-
demo.queue(max_size=20).launch(share=True)
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
#patch 1.0()
|
3 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
4 |
+
# of this software and associated documentation files (the "Software"), to deal
|
5 |
+
# in the Software without restriction, including without limitation the rights
|
6 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7 |
+
# copies of the Software, and to permit persons to whom the Software is
|
8 |
+
# furnished to do so, subject to the following conditions:
|
9 |
+
#
|
10 |
+
# ...
|
11 |
+
import os
|
12 |
+
import random
|
13 |
+
import uuid
|
14 |
+
from typing import Tuple
|
15 |
+
import gradio as gr
|
16 |
+
import numpy as np
|
17 |
+
from PIL import Image
|
18 |
+
import spaces
|
19 |
+
import torch
|
20 |
+
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
21 |
+
|
22 |
+
DESCRIPTIONz= """## SDXL-LoRA-DLC ⚡
|
23 |
+
"""
|
24 |
+
|
25 |
+
def save_image(img):
|
26 |
+
unique_name = str(uuid.uuid4()) + ".png"
|
27 |
+
img.save(unique_name)
|
28 |
+
return unique_name
|
29 |
+
|
30 |
+
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
31 |
+
if randomize_seed:
|
32 |
+
seed = random.randint(0, MAX_SEED)
|
33 |
+
return seed
|
34 |
+
|
35 |
+
MAX_SEED = np.iinfo(np.int32).max
|
36 |
+
|
37 |
+
if not torch.cuda.is_available():
|
38 |
+
DESCRIPTIONz += "\n<p>⚠️Running on CPU, This may not work on CPU. If it runs for an extended time or if you encounter errors, try running it on a GPU by duplicating the space using @spaces.GPU(). +import spaces.📍</p>"
|
39 |
+
|
40 |
+
USE_TORCH_COMPILE = 0
|
41 |
+
ENABLE_CPU_OFFLOAD = 0
|
42 |
+
|
43 |
+
if torch.cuda.is_available():
|
44 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
45 |
+
"SG161222/RealVisXL_V4.0_Lightning",
|
46 |
+
torch_dtype=torch.float16,
|
47 |
+
use_safetensors=True,
|
48 |
+
)
|
49 |
+
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
50 |
+
|
51 |
+
LORA_OPTIONS = {
|
52 |
+
"Realism (face/character)👦🏻": ("prithivMLmods/Canopus-Realism-LoRA", "Canopus-Realism-LoRA.safetensors", "rlms"),
|
53 |
+
"Pixar (art/toons)🙀": ("prithivMLmods/Canopus-Pixar-Art", "Canopus-Pixar-Art.safetensors", "pixar"),
|
54 |
+
"Photoshoot (camera/film)📸": ("prithivMLmods/Canopus-Photo-Shoot-Mini-LoRA", "Canopus-Photo-Shoot-Mini-LoRA.safetensors", "photo"),
|
55 |
+
"Clothing (hoodies/pant/shirts)👔": ("prithivMLmods/Canopus-Clothing-Adp-LoRA", "Canopus-Dress-Clothing-LoRA.safetensors", "clth"),
|
56 |
+
"Interior Architecture (house/hotel)🏠": ("prithivMLmods/Canopus-Interior-Architecture-0.1", "Canopus-Interior-Architecture-0.1δ.safetensors", "arch"),
|
57 |
+
"Fashion Product (wearing/usable)👜": ("prithivMLmods/Canopus-Fashion-Product-Dilation", "Canopus-Fashion-Product-Dilation.safetensors", "fashion"),
|
58 |
+
"Minimalistic Image (minimal/detailed)🏞️": ("prithivMLmods/Pegasi-Minimalist-Image-Style", "Pegasi-Minimalist-Image-Style.safetensors", "minimalist"),
|
59 |
+
"Modern Clothing (trend/new)👕": ("prithivMLmods/Canopus-Modern-Clothing-Design", "Canopus-Modern-Clothing-Design.safetensors", "mdrnclth"),
|
60 |
+
"Animaliea (farm/wild)🫎": ("prithivMLmods/Canopus-Animaliea-Artism", "Canopus-Animaliea-Artism.safetensors", "Animaliea"),
|
61 |
+
"Liquid Wallpaper (minimal/illustration)🖼️": ("prithivMLmods/Canopus-Liquid-Wallpaper-Art", "Canopus-Liquid-Wallpaper-Minimalize-LoRA.safetensors", "liquid"),
|
62 |
+
"Canes Cars (realistic/futurecars)🚘": ("prithivMLmods/Canes-Cars-Model-LoRA", "Canes-Cars-Model-LoRA.safetensors", "car"),
|
63 |
+
"Pencil Art (characteristic/creative)✏️": ("prithivMLmods/Canopus-Pencil-Art-LoRA", "Canopus-Pencil-Art-LoRA.safetensors", "Pencil Art"),
|
64 |
+
"Art Minimalistic (paint/semireal)🎨": ("prithivMLmods/Canopus-Art-Medium-LoRA", "Canopus-Art-Medium-LoRA.safetensors", "mdm"),
|
65 |
+
|
66 |
+
}
|
67 |
+
|
68 |
+
for model_name, weight_name, adapter_name in LORA_OPTIONS.values():
|
69 |
+
pipe.load_lora_weights(model_name, weight_name=weight_name, adapter_name=adapter_name)
|
70 |
+
pipe.to("cuda")
|
71 |
+
|
72 |
+
style_list = [
|
73 |
+
{
|
74 |
+
"name": "3840 x 2160",
|
75 |
+
"prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
|
76 |
+
"negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"name": "2560 x 1440",
|
80 |
+
"prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
|
81 |
+
"negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"name": "HD+",
|
85 |
+
"prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
|
86 |
+
"negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"name": "Style Zero",
|
90 |
+
"prompt": "{prompt}",
|
91 |
+
"negative_prompt": "",
|
92 |
+
},
|
93 |
+
]
|
94 |
+
|
95 |
+
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
|
96 |
+
|
97 |
+
DEFAULT_STYLE_NAME = "3840 x 2160"
|
98 |
+
STYLE_NAMES = list(styles.keys())
|
99 |
+
|
100 |
+
def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
|
101 |
+
if style_name in styles:
|
102 |
+
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
103 |
+
else:
|
104 |
+
p, n = styles[DEFAULT_STYLE_NAME]
|
105 |
+
|
106 |
+
if not negative:
|
107 |
+
negative = ""
|
108 |
+
return p.replace("{prompt}", positive), n + negative
|
109 |
+
|
110 |
+
@spaces.GPU(duration=180, enable_queue=True)
|
111 |
+
def generate(
|
112 |
+
prompt: str,
|
113 |
+
negative_prompt: str = "",
|
114 |
+
use_negative_prompt: bool = False,
|
115 |
+
seed: int = 0,
|
116 |
+
width: int = 1024,
|
117 |
+
height: int = 1024,
|
118 |
+
guidance_scale: float = 3,
|
119 |
+
randomize_seed: bool = False,
|
120 |
+
style_name: str = DEFAULT_STYLE_NAME,
|
121 |
+
lora_model: str = "Realism (face/character)👦🏻",
|
122 |
+
progress=gr.Progress(track_tqdm=True),
|
123 |
+
):
|
124 |
+
seed = int(randomize_seed_fn(seed, randomize_seed))
|
125 |
+
|
126 |
+
positive_prompt, effective_negative_prompt = apply_style(style_name, prompt, negative_prompt)
|
127 |
+
|
128 |
+
if not use_negative_prompt:
|
129 |
+
effective_negative_prompt = "" # type: ignore
|
130 |
+
|
131 |
+
model_name, weight_name, adapter_name = LORA_OPTIONS[lora_model]
|
132 |
+
pipe.set_adapters(adapter_name)
|
133 |
+
|
134 |
+
images = pipe(
|
135 |
+
prompt=positive_prompt,
|
136 |
+
negative_prompt=effective_negative_prompt,
|
137 |
+
width=width,
|
138 |
+
height=height,
|
139 |
+
guidance_scale=guidance_scale,
|
140 |
+
num_inference_steps=20,
|
141 |
+
num_images_per_prompt=1,
|
142 |
+
cross_attention_kwargs={"scale": 0.65},
|
143 |
+
output_type="pil",
|
144 |
+
).images
|
145 |
+
image_paths = [save_image(img) for img in images]
|
146 |
+
return image_paths, seed
|
147 |
+
|
148 |
+
examples = [
|
149 |
+
"Realism: Man in the style of dark beige and brown, uhd image, youthful protagonists, nonrepresentational ",
|
150 |
+
"Pixar: A young man with light brown wavy hair and light brown eyes sitting in an armchair and looking directly at the camera, pixar style, disney pixar, office background, ultra detailed, 1 man",
|
151 |
+
"Hoodie: Front view, capture a urban style, Superman Hoodie, technical materials, fabric small point label on text Blue theory, the design is minimal, with a raised collar, fabric is a Light yellow, low angle to capture the Hoodies form and detailing, f/5.6 to focus on the hoodies craftsmanship, solid grey background, studio light setting, with batman logo in the chest region of the t-shirt",
|
152 |
+
]
|
153 |
+
|
154 |
+
css = '''
|
155 |
+
.gradio-container{max-width: 545px !important}
|
156 |
+
h1{text-align:center}
|
157 |
+
footer {
|
158 |
+
visibility: hidden
|
159 |
+
}
|
160 |
+
'''
|
161 |
+
|
162 |
+
|
163 |
+
|
164 |
+
with gr.Blocks(css=css) as demo:
|
165 |
+
gr.Markdown(DESCRIPTIONz)
|
166 |
+
with gr.Group():
|
167 |
+
with gr.Row():
|
168 |
+
prompt = gr.Text(
|
169 |
+
label="Prompt",
|
170 |
+
show_label=False,
|
171 |
+
max_lines=1,
|
172 |
+
placeholder="Enter your prompt with resp. tag!",
|
173 |
+
container=False,
|
174 |
+
)
|
175 |
+
run_button = gr.Button("Run", scale=0)
|
176 |
+
result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
|
177 |
+
|
178 |
+
with gr.Accordion("Advanced options", open=False, visible=False):
|
179 |
+
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
|
180 |
+
negative_prompt = gr.Text(
|
181 |
+
label="Negative prompt",
|
182 |
+
lines=4,
|
183 |
+
max_lines=6,
|
184 |
+
value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
|
185 |
+
placeholder="Enter a negative prompt",
|
186 |
+
visible=True,
|
187 |
+
)
|
188 |
+
seed = gr.Slider(
|
189 |
+
label="Seed",
|
190 |
+
minimum=0,
|
191 |
+
maximum=MAX_SEED,
|
192 |
+
step=1,
|
193 |
+
value=0,
|
194 |
+
visible=True
|
195 |
+
)
|
196 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
197 |
+
|
198 |
+
with gr.Row(visible=True):
|
199 |
+
width = gr.Slider(
|
200 |
+
label="Width",
|
201 |
+
minimum=512,
|
202 |
+
maximum=2048,
|
203 |
+
step=8,
|
204 |
+
value=1024,
|
205 |
+
)
|
206 |
+
height = gr.Slider(
|
207 |
+
label="Height",
|
208 |
+
minimum=512,
|
209 |
+
maximum=2048,
|
210 |
+
step=8,
|
211 |
+
value=1024,
|
212 |
+
)
|
213 |
+
|
214 |
+
with gr.Row():
|
215 |
+
guidance_scale = gr.Slider(
|
216 |
+
label="Guidance Scale",
|
217 |
+
minimum=0.1,
|
218 |
+
maximum=20.0,
|
219 |
+
step=0.1,
|
220 |
+
value=3.0,
|
221 |
+
)
|
222 |
+
|
223 |
+
style_selection = gr.Radio(
|
224 |
+
show_label=True,
|
225 |
+
container=True,
|
226 |
+
interactive=True,
|
227 |
+
choices=STYLE_NAMES,
|
228 |
+
value=DEFAULT_STYLE_NAME,
|
229 |
+
label="Quality Style",
|
230 |
+
)
|
231 |
+
|
232 |
+
with gr.Row(visible=True):
|
233 |
+
model_choice = gr.Dropdown(
|
234 |
+
label="LoRA Selection",
|
235 |
+
choices=list(LORA_OPTIONS.keys()),
|
236 |
+
value="Realism (face/character)👦🏻"
|
237 |
+
)
|
238 |
+
|
239 |
+
gr.Examples(
|
240 |
+
examples=examples,
|
241 |
+
inputs=prompt,
|
242 |
+
outputs=[result, seed],
|
243 |
+
fn=generate,
|
244 |
+
cache_examples=False,
|
245 |
+
)
|
246 |
+
|
247 |
+
use_negative_prompt.change(
|
248 |
+
fn=lambda x: gr.update(visible=x),
|
249 |
+
inputs=use_negative_prompt,
|
250 |
+
outputs=negative_prompt,
|
251 |
+
api_name=False,
|
252 |
+
)
|
253 |
+
|
254 |
+
gr.on(
|
255 |
+
triggers=[
|
256 |
+
prompt.submit,
|
257 |
+
negative_prompt.submit,
|
258 |
+
run_button.click,
|
259 |
+
],
|
260 |
+
fn=generate,
|
261 |
+
inputs=[
|
262 |
+
prompt,
|
263 |
+
negative_prompt,
|
264 |
+
use_negative_prompt,
|
265 |
+
seed,
|
266 |
+
width,
|
267 |
+
height,
|
268 |
+
guidance_scale,
|
269 |
+
randomize_seed,
|
270 |
+
style_selection,
|
271 |
+
model_choice,
|
272 |
+
],
|
273 |
+
outputs=[result, seed],
|
274 |
+
api_name="run",
|
275 |
+
)
|
276 |
+
|
277 |
+
|
278 |
+
|
279 |
+
gr.Markdown("⚡Models used in the playground [[Lightning]](https://huggingface.co/SG161222/RealVisXL_V4.0_Lightning) & LoRA from [[LoRA]](https://huggingface.co/collections/prithivMLmods/dev-models-667803a6d5ac75b59110e527) for image generation. The specific LoRA in the space that requires appropriate trigger words brings good results. The model is still in the training phase. This is not the final version and may contain artifacts and perform poorly in some cases.")
|
280 |
+
gr.Markdown("⚡This is the demo space for generating images using Stable Diffusion with quality styles, different LoRA models and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images.<a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.")
|
281 |
+
gr.Markdown("⚡Make sure that the prompts passed meet the trigger word conditions and are well-detailed. This space is for educational purposes only; using it productively is meant for your own knowledge.")
|
282 |
+
gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
|
283 |
+
|
284 |
+
if __name__ == "__main__":
|
285 |
+
demo.queue(max_size=30).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|