Spaces:
Sleeping
Sleeping
Charbel Malo
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -270,7 +270,7 @@ def remove_custom_lora(selected_indices, current_loras, gallery):
|
|
270 |
lora_image_2
|
271 |
)
|
272 |
|
273 |
-
@spaces.GPU(
|
274 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
275 |
print("Generating image...")
|
276 |
pipe.to("cuda")
|
@@ -290,7 +290,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
290 |
):
|
291 |
yield img
|
292 |
|
293 |
-
@spaces.GPU(
|
294 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
295 |
pipe_i2i.to("cuda")
|
296 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
@@ -309,6 +309,7 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
309 |
).images[0]
|
310 |
return final_image
|
311 |
|
|
|
312 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
313 |
if not selected_indices:
|
314 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
@@ -340,20 +341,20 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
340 |
for idx, lora in enumerate(selected_loras):
|
341 |
lora_name = f"lora_{idx}"
|
342 |
lora_names.append(lora_name)
|
|
|
343 |
lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2)
|
344 |
lora_path = lora['repo']
|
345 |
weight_name = lora.get("weights")
|
346 |
print(f"Lora Path: {lora_path}")
|
347 |
-
if image_input is not None
|
348 |
-
|
349 |
-
|
350 |
-
else
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
pipe.load_lora_weights(lora_path, low_cpu_mem_usage=True, adapter_name=lora_name)
|
357 |
print("Loaded LoRAs:", lora_names)
|
358 |
print("Adapter weights:", lora_weights)
|
359 |
if image_input is not None:
|
@@ -463,7 +464,7 @@ css = '''
|
|
463 |
#component-11{align-self: stretch;}
|
464 |
'''
|
465 |
|
466 |
-
with gr.Blocks(css=css, delete_cache=(60,
|
467 |
title = gr.HTML(
|
468 |
"""<h1><img src="https://i.imgur.com/wMh2Oek.png" alt="LoRA"> LoRA Lab [beta]</h1><br><span style="
|
469 |
margin-top: -25px !important;
|
@@ -515,18 +516,20 @@ with gr.Blocks(css=css, delete_cache=(60, 3600)) as app:
|
|
515 |
label="Or pick from the LoRA Explorer gallery",
|
516 |
allow_preview=False,
|
517 |
columns=5,
|
518 |
-
elem_id="gallery"
|
|
|
|
|
519 |
)
|
520 |
with gr.Column():
|
521 |
progress_bar = gr.Markdown(elem_id="progress", visible=False)
|
522 |
-
result = gr.Image(label="Generated Image", interactive=False)
|
523 |
with gr.Accordion("History", open=False):
|
524 |
history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
|
525 |
|
526 |
with gr.Row():
|
527 |
with gr.Accordion("Advanced Settings", open=False):
|
528 |
with gr.Row():
|
529 |
-
input_image = gr.Image(label="Input image", type="filepath")
|
530 |
image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
|
531 |
with gr.Column():
|
532 |
with gr.Row():
|
@@ -575,7 +578,7 @@ with gr.Blocks(css=css, delete_cache=(60, 3600)) as app:
|
|
575 |
fn=run_lora,
|
576 |
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
|
577 |
outputs=[result, seed, progress_bar]
|
578 |
-
).then(
|
579 |
fn=lambda x, history: update_history(x, history),
|
580 |
inputs=[result, history_gallery],
|
581 |
outputs=history_gallery,
|
|
|
270 |
lora_image_2
|
271 |
)
|
272 |
|
273 |
+
@spaces.GPU()
|
274 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
|
275 |
print("Generating image...")
|
276 |
pipe.to("cuda")
|
|
|
290 |
):
|
291 |
yield img
|
292 |
|
293 |
+
@spaces.GPU()
|
294 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
295 |
pipe_i2i.to("cuda")
|
296 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
|
309 |
).images[0]
|
310 |
return final_image
|
311 |
|
312 |
+
@spaces.GPU(duration=75)
|
313 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
314 |
if not selected_indices:
|
315 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
|
|
341 |
for idx, lora in enumerate(selected_loras):
|
342 |
lora_name = f"lora_{idx}"
|
343 |
lora_names.append(lora_name)
|
344 |
+
print(f"Lora Name: {lora_name}")
|
345 |
lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2)
|
346 |
lora_path = lora['repo']
|
347 |
weight_name = lora.get("weights")
|
348 |
print(f"Lora Path: {lora_path}")
|
349 |
+
pipe_to_use = pipe_i2i if image_input is not None else pipe
|
350 |
+
pipe_to_use.load_lora_weights(
|
351 |
+
lora_path,
|
352 |
+
weight_name=weight_name if weight_name else None,
|
353 |
+
low_cpu_mem_usage=True,
|
354 |
+
adapter_name=lora_name
|
355 |
+
)
|
356 |
+
# if image_input is not None: pipe_i2i = pipe_to_use
|
357 |
+
# else: pipe = pipe_to_use
|
|
|
358 |
print("Loaded LoRAs:", lora_names)
|
359 |
print("Adapter weights:", lora_weights)
|
360 |
if image_input is not None:
|
|
|
464 |
#component-11{align-self: stretch;}
|
465 |
'''
|
466 |
|
467 |
+
with gr.Blocks(css=css, delete_cache=(60, 60)) as app:
|
468 |
title = gr.HTML(
|
469 |
"""<h1><img src="https://i.imgur.com/wMh2Oek.png" alt="LoRA"> LoRA Lab [beta]</h1><br><span style="
|
470 |
margin-top: -25px !important;
|
|
|
516 |
label="Or pick from the LoRA Explorer gallery",
|
517 |
allow_preview=False,
|
518 |
columns=5,
|
519 |
+
elem_id="gallery",
|
520 |
+
show_share_button=False,
|
521 |
+
interactive=False
|
522 |
)
|
523 |
with gr.Column():
|
524 |
progress_bar = gr.Markdown(elem_id="progress", visible=False)
|
525 |
+
result = gr.Image(label="Generated Image", interactive=False, show_share_button=False)
|
526 |
with gr.Accordion("History", open=False):
|
527 |
history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
|
528 |
|
529 |
with gr.Row():
|
530 |
with gr.Accordion("Advanced Settings", open=False):
|
531 |
with gr.Row():
|
532 |
+
input_image = gr.Image(label="Input image", type="filepath", show_share_button=False)
|
533 |
image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
|
534 |
with gr.Column():
|
535 |
with gr.Row():
|
|
|
578 |
fn=run_lora,
|
579 |
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
|
580 |
outputs=[result, seed, progress_bar]
|
581 |
+
).then(
|
582 |
fn=lambda x, history: update_history(x, history),
|
583 |
inputs=[result, history_gallery],
|
584 |
outputs=history_gallery,
|