Update app.py
Browse files
app.py
CHANGED
@@ -62,7 +62,7 @@ def is_civitai_url(url):
|
|
62 |
|
63 |
@spaces.GPU
|
64 |
def generate_image(prompt, negative_prompt, lora_url, num_inference_steps=30, guidance_scale=7.0,
|
65 |
-
model="Real6.0", num_images=1, width=512, height=512):
|
66 |
|
67 |
if model == "Real5.0":
|
68 |
model_id = "SG161222/Realistic_Vision_V5.0_noVAE"
|
@@ -147,6 +147,11 @@ def generate_image(prompt, negative_prompt, lora_url, num_inference_steps=30, gu
|
|
147 |
use_karras_sigmas=True
|
148 |
)
|
149 |
|
|
|
|
|
|
|
|
|
|
|
150 |
text_inputs = tokenizer(
|
151 |
prompt,
|
152 |
padding="max_length",
|
@@ -175,10 +180,11 @@ def generate_image(prompt, negative_prompt, lora_url, num_inference_steps=30, gu
|
|
175 |
guidance_scale=guidance_scale,
|
176 |
width=width,
|
177 |
height=height,
|
178 |
-
num_images_per_prompt=num_images
|
|
|
179 |
)
|
180 |
|
181 |
-
return result.images
|
182 |
|
183 |
def clean_lora_cache():
|
184 |
"""Clean the LoRA cache directory"""
|
@@ -259,6 +265,7 @@ with gr.Blocks() as demo:
|
|
259 |
step=0.5,
|
260 |
label="Guidance Scale"
|
261 |
)
|
|
|
262 |
|
263 |
with gr.Column():
|
264 |
# Output component
|
@@ -269,13 +276,14 @@ with gr.Blocks() as demo:
|
|
269 |
columns=2,
|
270 |
rows=2
|
271 |
)
|
|
|
272 |
|
273 |
# Connect the interface to the generation function
|
274 |
generate_button.click(
|
275 |
fn=generate_image,
|
276 |
inputs=[prompt, negative_prompt, lora_input, steps_slider, guidance_slider,
|
277 |
-
model, num_images, width, height],
|
278 |
-
outputs=gallery
|
279 |
)
|
280 |
|
281 |
# Connect clear cache button
|
|
|
62 |
|
63 |
@spaces.GPU
|
64 |
def generate_image(prompt, negative_prompt, lora_url, num_inference_steps=30, guidance_scale=7.0,
|
65 |
+
model="Real6.0", num_images=1, width=512, height=512,seed=None):
|
66 |
|
67 |
if model == "Real5.0":
|
68 |
model_id = "SG161222/Realistic_Vision_V5.0_noVAE"
|
|
|
147 |
use_karras_sigmas=True
|
148 |
)
|
149 |
|
150 |
+
if seed is None:
|
151 |
+
seed = random.randint(0, 2**32 - 1)
|
152 |
+
|
153 |
+
generator = torch.manual_seed(seed)
|
154 |
+
|
155 |
text_inputs = tokenizer(
|
156 |
prompt,
|
157 |
padding="max_length",
|
|
|
180 |
guidance_scale=guidance_scale,
|
181 |
width=width,
|
182 |
height=height,
|
183 |
+
num_images_per_prompt=num_images,
|
184 |
+
generator=generator
|
185 |
)
|
186 |
|
187 |
+
return result.images,seed
|
188 |
|
189 |
def clean_lora_cache():
|
190 |
"""Clean the LoRA cache directory"""
|
|
|
265 |
step=0.5,
|
266 |
label="Guidance Scale"
|
267 |
)
|
268 |
+
seed_input = gr.Number(value=random.randint(0, 2**32 - 1), label="Seed (optional)")
|
269 |
|
270 |
with gr.Column():
|
271 |
# Output component
|
|
|
276 |
columns=2,
|
277 |
rows=2
|
278 |
)
|
279 |
+
seed_display = gr.Textbox(label="Seed Used", interactive=False)
|
280 |
|
281 |
# Connect the interface to the generation function
|
282 |
generate_button.click(
|
283 |
fn=generate_image,
|
284 |
inputs=[prompt, negative_prompt, lora_input, steps_slider, guidance_slider,
|
285 |
+
model, num_images, width, height,seed_input],
|
286 |
+
outputs=[gallery,seed]
|
287 |
)
|
288 |
|
289 |
# Connect clear cache button
|