Spaces:
Sleeping
Sleeping
Charbel Malo
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -374,6 +374,7 @@ def add_custom_lora(custom_lora, selected_indices, current_loras, gallery):
|
|
374 |
else:
|
375 |
return current_loras, gr.update(), "Select a LoRA 1", "Select a LoRA 2", "Select a LoRA 3", "Select a LoRA 4", "Select a LoRA 5", "Select a LoRA 6", selected_indices, 1.15, 1.15, 1.15, 1.15, 1.15, 1.15, None, None, None, None, None, None
|
376 |
|
|
|
377 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
378 |
print("Generating image...")
|
379 |
pipe.to("cuda")
|
@@ -393,6 +394,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
393 |
):
|
394 |
yield img
|
395 |
|
|
|
396 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
397 |
pipe_i2i.to("cuda")
|
398 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
@@ -412,7 +414,7 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
412 |
return final_image
|
413 |
|
414 |
|
415 |
-
@spaces.GPU(
|
416 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_scale_5, lora_scale_6, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
417 |
if not selected_indices:
|
418 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
@@ -715,7 +717,7 @@ with gr.Blocks(css=css, delete_cache=(60, 60)) as app:
|
|
715 |
])
|
716 |
remove_lora_1event = remove_button_1.click(
|
717 |
fn=remove_lora_1,
|
718 |
-
inputs=[
|
719 |
outputs=[
|
720 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
721 |
selected_indices,
|
@@ -725,7 +727,7 @@ with gr.Blocks(css=css, delete_cache=(60, 60)) as app:
|
|
725 |
)
|
726 |
remove_button_2.click(
|
727 |
remove_lora_2,
|
728 |
-
inputs=[
|
729 |
outputs=[
|
730 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
731 |
selected_indices,
|
@@ -735,7 +737,7 @@ with gr.Blocks(css=css, delete_cache=(60, 60)) as app:
|
|
735 |
)
|
736 |
remove_button_3.click(
|
737 |
remove_lora,
|
738 |
-
inputs=[
|
739 |
outputs=[
|
740 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
741 |
selected_indices,
|
@@ -745,7 +747,7 @@ with gr.Blocks(css=css, delete_cache=(60, 60)) as app:
|
|
745 |
)
|
746 |
remove_button_4.click(
|
747 |
remove_lora,
|
748 |
-
inputs=[
|
749 |
outputs=[
|
750 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
751 |
selected_indices,
|
@@ -765,7 +767,7 @@ with gr.Blocks(css=css, delete_cache=(60, 60)) as app:
|
|
765 |
)
|
766 |
remove_button_6.click(
|
767 |
remove_lora,
|
768 |
-
inputs=[
|
769 |
outputs=[
|
770 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
771 |
selected_indices,
|
|
|
374 |
else:
|
375 |
return current_loras, gr.update(), "Select a LoRA 1", "Select a LoRA 2", "Select a LoRA 3", "Select a LoRA 4", "Select a LoRA 5", "Select a LoRA 6", selected_indices, 1.15, 1.15, 1.15, 1.15, 1.15, 1.15, None, None, None, None, None, None
|
376 |
|
377 |
+
@spaces.GPU(duration=75)
|
378 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
379 |
print("Generating image...")
|
380 |
pipe.to("cuda")
|
|
|
394 |
):
|
395 |
yield img
|
396 |
|
397 |
+
@spaces.GPU()
|
398 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
399 |
pipe_i2i.to("cuda")
|
400 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
|
414 |
return final_image
|
415 |
|
416 |
|
417 |
+
@spaces.GPU()
|
418 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, lora_scale_5, lora_scale_6, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
419 |
if not selected_indices:
|
420 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
|
|
717 |
])
|
718 |
remove_lora_1event = remove_button_1.click(
|
719 |
fn=remove_lora_1,
|
720 |
+
inputs=[selected_indices, loras_state],
|
721 |
outputs=[
|
722 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
723 |
selected_indices,
|
|
|
727 |
)
|
728 |
remove_button_2.click(
|
729 |
remove_lora_2,
|
730 |
+
inputs=[selected_indices, loras_state],
|
731 |
outputs=[
|
732 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
733 |
selected_indices,
|
|
|
737 |
)
|
738 |
remove_button_3.click(
|
739 |
remove_lora,
|
740 |
+
inputs=[selected_indices, loras_state],
|
741 |
outputs=[
|
742 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
743 |
selected_indices,
|
|
|
747 |
)
|
748 |
remove_button_4.click(
|
749 |
remove_lora,
|
750 |
+
inputs=[selected_indices, loras_state],
|
751 |
outputs=[
|
752 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
753 |
selected_indices,
|
|
|
767 |
)
|
768 |
remove_button_6.click(
|
769 |
remove_lora,
|
770 |
+
inputs=[selected_indices, loras_state],
|
771 |
outputs=[
|
772 |
selected_info_1, selected_info_2, selected_info_3, selected_info_4, selected_info_5, selected_info_6,
|
773 |
selected_indices,
|