Rename quad app.py to app.py
Browse files- quad app.py → app.py +33 -19
quad app.py → app.py
RENAMED
@@ -293,24 +293,30 @@ def remove_custom_lora(selected_indices, current_loras, gallery):
|
|
293 |
lora_image_2
|
294 |
)
|
295 |
|
296 |
-
def
|
297 |
-
print("Generating
|
298 |
pipe.to("cuda")
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
|
315 |
#def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
316 |
# pipe_i2i.to("cuda")
|
@@ -331,7 +337,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
331 |
return img
|
332 |
|
333 |
@spaces.GPU(duration=75)
|
334 |
-
def
|
335 |
if not selected_indices:
|
336 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
337 |
|
@@ -349,6 +355,7 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
349 |
appends.append(trigger_word)
|
350 |
prompt_mash = " ".join(prepends + [prompt] + appends)
|
351 |
print("Prompt Mash: ", prompt_mash)
|
|
|
352 |
# Unload previous LoRA weights
|
353 |
with calculateDuration("Unloading LoRA"):
|
354 |
pipe.unload_lora_weights()
|
@@ -377,6 +384,8 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
377 |
# else: pipe = pipe_to_use
|
378 |
print("Loaded LoRAs:", lora_names)
|
379 |
print("Adapter weights:", lora_weights)
|
|
|
|
|
380 |
# if image_input is not None:
|
381 |
# pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
|
382 |
# else:
|
@@ -393,6 +402,7 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
393 |
# yield final_image, seed, gr.update(visible=False)
|
394 |
# else:
|
395 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
|
396 |
# Consume the generator to get the final image
|
397 |
# final_image = None
|
398 |
step_counter = 0
|
@@ -628,7 +638,11 @@ with gr.Blocks(css=css, delete_cache=(240, 240)) as app:
|
|
628 |
triggers=[generate_button.click, prompt.submit],
|
629 |
fn=run_lora,
|
630 |
inputs=[prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
|
631 |
-
outputs=[
|
|
|
|
|
|
|
|
|
632 |
)#.then(
|
633 |
# fn=lambda x, history: update_history(x, history),
|
634 |
# inputs=[result, history_gallery],
|
|
|
293 |
lora_image_2
|
294 |
)
|
295 |
|
296 |
+
def generate_images(prompt_mash, steps, cfg_scale, width, height, progress):
|
297 |
+
print("Generating multiple images...")
|
298 |
pipe.to("cuda")
|
299 |
+
images = []
|
300 |
+
|
301 |
+
for _ in range(4): # Generate 4 images
|
302 |
+
seed = random.randint(0, MAX_SEED)
|
303 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
304 |
+
with calculateDuration("Generating image"):
|
305 |
+
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
306 |
+
prompt=prompt_mash,
|
307 |
+
num_inference_steps=steps,
|
308 |
+
guidance_scale=cfg_scale,
|
309 |
+
width=width,
|
310 |
+
height=height,
|
311 |
+
generator=generator,
|
312 |
+
joint_attention_kwargs={"scale": 1.0},
|
313 |
+
output_type="pil",
|
314 |
+
good_vae=good_vae,
|
315 |
+
):
|
316 |
+
images.append((img, seed)) # Store image and its seed
|
317 |
+
break # Only take the first generated image
|
318 |
+
|
319 |
+
return images
|
320 |
|
321 |
#def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
322 |
# pipe_i2i.to("cuda")
|
|
|
337 |
return img
|
338 |
|
339 |
@spaces.GPU(duration=75)
|
340 |
+
def run_lora_multi(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
341 |
if not selected_indices:
|
342 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
343 |
|
|
|
355 |
appends.append(trigger_word)
|
356 |
prompt_mash = " ".join(prepends + [prompt] + appends)
|
357 |
print("Prompt Mash: ", prompt_mash)
|
358 |
+
|
359 |
# Unload previous LoRA weights
|
360 |
with calculateDuration("Unloading LoRA"):
|
361 |
pipe.unload_lora_weights()
|
|
|
384 |
# else: pipe = pipe_to_use
|
385 |
print("Loaded LoRAs:", lora_names)
|
386 |
print("Adapter weights:", lora_weights)
|
387 |
+
print("cfg_scale:", cfg_scale)
|
388 |
+
print("steps:", steps)
|
389 |
# if image_input is not None:
|
390 |
# pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
|
391 |
# else:
|
|
|
402 |
# yield final_image, seed, gr.update(visible=False)
|
403 |
# else:
|
404 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
405 |
+
return [(img, seed) for img, seed in images]
|
406 |
# Consume the generator to get the final image
|
407 |
# final_image = None
|
408 |
step_counter = 0
|
|
|
638 |
triggers=[generate_button.click, prompt.submit],
|
639 |
fn=run_lora,
|
640 |
inputs=[prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
|
641 |
+
outputs=[
|
642 |
+
result, seed, progress_bar,
|
643 |
+
gr.Gallery(label="Generated Images"), # Display 4 images
|
644 |
+
gr.Markdown(label="Seeds") # Display seeds used
|
645 |
+
]
|
646 |
)#.then(
|
647 |
# fn=lambda x, history: update_history(x, history),
|
648 |
# inputs=[result, history_gallery],
|