Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ import pandas as pd
|
|
18 |
import numpy as np
|
19 |
from pathlib import Path
|
20 |
|
21 |
-
from env import models, models_dev, models_schnell, models_fill, models_canny, models_depth, models_edit, num_loras, num_cns, HF_TOKEN, single_file_base_models
|
22 |
from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
|
23 |
description_ui, compose_lora_json, is_valid_lora, fuse_loras, turbo_loras, save_image, preprocess_i2i_image,
|
24 |
get_trigger_word, enhance_prompt, set_control_union_image, get_canny_image, get_depth_image,
|
@@ -236,93 +236,52 @@ def download_file(url, directory=None):
|
|
236 |
|
237 |
return filepath
|
238 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
|
|
|
240 |
selected_index = evt.index
|
241 |
selected_indices = selected_indices or []
|
242 |
if selected_index in selected_indices:
|
243 |
selected_indices.remove(selected_index)
|
244 |
else:
|
245 |
-
if len(selected_indices) <
|
246 |
selected_indices.append(selected_index)
|
247 |
else:
|
248 |
-
gr.Warning("You can select up to
|
249 |
-
return gr.update(),
|
250 |
-
|
251 |
-
selected_info_1 = "Select a LoRA 1"
|
252 |
-
selected_info_2 = "Select a LoRA 2"
|
253 |
-
lora_scale_1 = 1.15
|
254 |
-
lora_scale_2 = 1.15
|
255 |
-
lora_image_1 = None
|
256 |
-
lora_image_2 = None
|
257 |
-
if len(selected_indices) >= 1:
|
258 |
-
lora1 = loras_state[selected_indices[0]]
|
259 |
-
selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
|
260 |
-
lora_image_1 = lora1['image']
|
261 |
-
if len(selected_indices) >= 2:
|
262 |
-
lora2 = loras_state[selected_indices[1]]
|
263 |
-
selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
|
264 |
-
lora_image_2 = lora2['image']
|
265 |
|
|
|
266 |
if selected_indices:
|
267 |
last_selected_lora = loras_state[selected_indices[-1]]
|
268 |
new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
|
269 |
else:
|
270 |
new_placeholder = "Type a prompt"
|
271 |
|
272 |
-
return gr.update(placeholder=new_placeholder),
|
273 |
-
|
274 |
-
def
|
275 |
-
if len(selected_indices) >= 1:
|
276 |
-
selected_indices.pop(
|
277 |
-
|
278 |
-
|
279 |
-
lora_scale_1 = 1.15
|
280 |
-
lora_scale_2 = 1.15
|
281 |
-
lora_image_1 = None
|
282 |
-
lora_image_2 = None
|
283 |
-
if len(selected_indices) >= 1:
|
284 |
-
lora1 = loras_state[selected_indices[0]]
|
285 |
-
selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
|
286 |
-
lora_image_1 = lora1['image']
|
287 |
-
if len(selected_indices) >= 2:
|
288 |
-
lora2 = loras_state[selected_indices[1]]
|
289 |
-
selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
|
290 |
-
lora_image_2 = lora2['image']
|
291 |
-
return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2
|
292 |
-
|
293 |
-
def remove_lora_2(selected_indices, loras_state):
|
294 |
-
if len(selected_indices) >= 2:
|
295 |
-
selected_indices.pop(1)
|
296 |
-
selected_info_1 = "Select LoRA 1"
|
297 |
-
selected_info_2 = "Select LoRA 2"
|
298 |
-
lora_scale_1 = 1.15
|
299 |
-
lora_scale_2 = 1.15
|
300 |
-
lora_image_1 = None
|
301 |
-
lora_image_2 = None
|
302 |
-
if len(selected_indices) >= 1:
|
303 |
-
lora1 = loras_state[selected_indices[0]]
|
304 |
-
selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
|
305 |
-
lora_image_1 = lora1['image']
|
306 |
-
if len(selected_indices) >= 2:
|
307 |
-
lora2 = loras_state[selected_indices[1]]
|
308 |
-
selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
|
309 |
-
lora_image_2 = lora2['image']
|
310 |
-
return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2
|
311 |
|
312 |
def randomize_loras(selected_indices, loras_state):
|
313 |
-
if len(loras_state) <
|
314 |
raise gr.Error("Not enough LoRAs to randomize.")
|
315 |
-
selected_indices = random.sample(range(len(loras_state)),
|
316 |
-
lora1 = loras_state[selected_indices[0]]
|
317 |
-
lora2 = loras_state[selected_indices[1]]
|
318 |
-
selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
|
319 |
-
selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
|
320 |
-
lora_scale_1 = 1.15
|
321 |
-
lora_scale_2 = 1.15
|
322 |
-
lora_image_1 = lora1['image']
|
323 |
-
lora_image_2 = lora2['image']
|
324 |
random_prompt = random.choice(prompt_values)
|
325 |
-
|
|
|
326 |
|
327 |
def download_loras_images(loras_json_orig: list[dict]):
|
328 |
api = HfApi(token=HF_TOKEN)
|
@@ -348,6 +307,7 @@ def download_loras_images(loras_json_orig: list[dict]):
|
|
348 |
return loras_json
|
349 |
|
350 |
def add_custom_lora(custom_lora, selected_indices, current_loras, gallery):
|
|
|
351 |
if custom_lora:
|
352 |
try:
|
353 |
title, repo, path, trigger_word, image = check_custom_model(custom_lora)
|
@@ -377,44 +337,28 @@ def add_custom_lora(custom_lora, selected_indices, current_loras, gallery):
|
|
377 |
# Update gallery
|
378 |
gallery_items = [(item["image"], item["title"]) for item in current_loras]
|
379 |
# Update selected_indices if there's room
|
380 |
-
if len(selected_indices) <
|
381 |
selected_indices.append(existing_item_index)
|
382 |
else:
|
383 |
-
gr.Warning("You can select up to
|
384 |
|
385 |
# Update selected_info and images
|
386 |
-
|
387 |
-
selected_info_2 = "Select a LoRA 2"
|
388 |
-
lora_scale_1 = 1.15
|
389 |
-
lora_scale_2 = 1.15
|
390 |
-
lora_image_1 = None
|
391 |
-
lora_image_2 = None
|
392 |
-
if len(selected_indices) >= 1:
|
393 |
-
lora1 = current_loras[selected_indices[0]]
|
394 |
-
selected_info_1 = f"### LoRA 1 Selected: {lora1['title']} ✨"
|
395 |
-
lora_image_1 = lora1['image'] if lora1['image'] else None
|
396 |
-
if len(selected_indices) >= 2:
|
397 |
-
lora2 = current_loras[selected_indices[1]]
|
398 |
-
selected_info_2 = f"### LoRA 2 Selected: {lora2['title']} ✨"
|
399 |
-
lora_image_2 = lora2['image'] if lora2['image'] else None
|
400 |
print("Finished adding custom LoRA")
|
401 |
return (
|
402 |
current_loras,
|
403 |
gr.update(value=gallery_items),
|
404 |
-
|
405 |
-
selected_info_2,
|
406 |
selected_indices,
|
407 |
-
|
408 |
-
|
409 |
-
lora_image_1,
|
410 |
-
lora_image_2
|
411 |
)
|
412 |
except Exception as e:
|
413 |
print(e)
|
414 |
gr.Warning(str(e))
|
415 |
-
return current_loras, gr.update(),
|
416 |
else:
|
417 |
-
return current_loras, gr.update(),
|
418 |
|
419 |
def remove_custom_lora(selected_indices, current_loras, gallery):
|
420 |
if current_loras:
|
@@ -428,30 +372,14 @@ def remove_custom_lora(selected_indices, current_loras, gallery):
|
|
428 |
# Update gallery
|
429 |
gallery_items = [(item["image"], item["title"]) for item in current_loras]
|
430 |
# Update selected_info and images
|
431 |
-
|
432 |
-
selected_info_2 = "Select a LoRA 2"
|
433 |
-
lora_scale_1 = 1.15
|
434 |
-
lora_scale_2 = 1.15
|
435 |
-
lora_image_1 = None
|
436 |
-
lora_image_2 = None
|
437 |
-
if len(selected_indices) >= 1:
|
438 |
-
lora1 = current_loras[selected_indices[0]]
|
439 |
-
selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
|
440 |
-
lora_image_1 = lora1['image']
|
441 |
-
if len(selected_indices) >= 2:
|
442 |
-
lora2 = current_loras[selected_indices[1]]
|
443 |
-
selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
|
444 |
-
lora_image_2 = lora2['image']
|
445 |
return (
|
446 |
current_loras,
|
447 |
gr.update(value=gallery_items),
|
448 |
-
|
449 |
-
selected_info_2,
|
450 |
selected_indices,
|
451 |
-
|
452 |
-
|
453 |
-
lora_image_1,
|
454 |
-
lora_image_2
|
455 |
)
|
456 |
|
457 |
@spaces.GPU(duration=70)
|
@@ -603,7 +531,7 @@ def generate_image_to_image(prompt_mash: str, image_input_path_dict: dict, image
|
|
603 |
raise gr.Error(f"I2I Inference Error: {e}") from e
|
604 |
|
605 |
def run_lora(prompt: str, image_input: dict, image_strength: float, task_type: str, turbo_mode: str, blur_mask: bool, blur_factor: float,
|
606 |
-
cfg_scale: float, steps: int, selected_indices,
|
607 |
randomize_seed: bool, seed: int, width: int, height: int, sigmas_factor: float, loras_state,
|
608 |
lora_json, cn_on: bool, translate_on: bool, progress=gr.Progress(track_tqdm=True)):
|
609 |
global pipe, pipe_i2i
|
@@ -665,7 +593,8 @@ def run_lora(prompt: str, image_input: dict, image_strength: float, task_type: s
|
|
665 |
lora_name = f"lora_{idx}"
|
666 |
lora_names.append(lora_name)
|
667 |
print(f"Lora Name: {lora_name}")
|
668 |
-
lora_weights.append(
|
|
|
669 |
lora_path = lora['repo']
|
670 |
weight_name = lora.get("weights")
|
671 |
print(f"Lora Path: {lora_path}")
|
@@ -809,7 +738,9 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
|
|
809 |
elem_id="title",
|
810 |
)
|
811 |
loras_state = gr.State(loras)
|
|
|
812 |
selected_indices = gr.State([])
|
|
|
813 |
with gr.Row():
|
814 |
with gr.Column(scale=3):
|
815 |
with gr.Group():
|
@@ -833,29 +764,24 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
|
|
833 |
with gr.Row(elem_id="loaded_loras"):
|
834 |
with gr.Column(scale=1, min_width=25):
|
835 |
randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
|
836 |
-
|
837 |
-
|
838 |
-
|
839 |
-
|
840 |
-
|
841 |
-
|
842 |
-
with gr.
|
843 |
-
|
844 |
-
|
845 |
-
|
846 |
-
|
847 |
-
|
848 |
-
|
849 |
-
|
850 |
-
|
851 |
-
selected_info_2 = gr.Markdown("Select a LoRA 2")
|
852 |
-
with gr.Column(scale=5, min_width=50):
|
853 |
-
lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
|
854 |
-
with gr.Row():
|
855 |
-
remove_button_2 = gr.Button("Remove", size="sm")
|
856 |
with gr.Row():
|
857 |
with gr.Column():
|
858 |
-
selected_info = gr.Markdown("")
|
859 |
gallery = gr.Gallery([(item["image"], item["title"]) for item in loras], label="LoRA Gallery", allow_preview=False,
|
860 |
columns=4, elem_id="gallery", show_share_button=False, interactive=False)
|
861 |
with gr.Group():
|
@@ -982,31 +908,29 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
|
|
982 |
gallery.select(
|
983 |
update_selection,
|
984 |
inputs=[selected_indices, loras_state, width, height],
|
985 |
-
outputs=[prompt,
|
986 |
-
|
987 |
-
|
988 |
-
|
989 |
-
|
990 |
-
|
991 |
-
|
992 |
-
|
993 |
-
|
994 |
-
outputs=[selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2]
|
995 |
-
)
|
996 |
randomize_button.click(
|
997 |
randomize_loras,
|
998 |
inputs=[selected_indices, loras_state],
|
999 |
-
outputs=[
|
1000 |
)
|
1001 |
add_custom_lora_button.click(
|
1002 |
add_custom_lora,
|
1003 |
inputs=[custom_lora, selected_indices, loras_state, gallery],
|
1004 |
-
outputs=[loras_state, gallery,
|
1005 |
)
|
1006 |
remove_custom_lora_button.click(
|
1007 |
remove_custom_lora,
|
1008 |
inputs=[selected_indices, loras_state, gallery],
|
1009 |
-
outputs=[loras_state, gallery,
|
1010 |
)
|
1011 |
gr.on(
|
1012 |
triggers=[generate_button.click, prompt.submit],
|
@@ -1018,7 +942,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
|
|
1018 |
trigger_mode="once",
|
1019 |
).success(
|
1020 |
fn=run_lora,
|
1021 |
-
inputs=[prompt, input_image, image_strength, task_type, turbo_mode, blur_mask, blur_factor, cfg_scale, steps, selected_indices,
|
1022 |
randomize_seed, seed, width, height, sigmas_factor, loras_state, lora_repo_json, cn_on, auto_trans],
|
1023 |
outputs=[result, seed, progress_bar],
|
1024 |
queue=True,
|
|
|
18 |
import numpy as np
|
19 |
from pathlib import Path
|
20 |
|
21 |
+
from env import models, models_dev, models_schnell, models_fill, models_canny, models_depth, models_edit, num_loras, num_cns, MAX_LORA, HF_TOKEN, single_file_base_models
|
22 |
from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
|
23 |
description_ui, compose_lora_json, is_valid_lora, fuse_loras, turbo_loras, save_image, preprocess_i2i_image,
|
24 |
get_trigger_word, enhance_prompt, set_control_union_image, get_canny_image, get_depth_image,
|
|
|
236 |
|
237 |
return filepath
|
238 |
|
239 |
+
def get_lora_selected_info(selected_indices, current_loras):
|
240 |
+
selected_info = [f"Select a LoRA {int(i+1)}" for i in range(MAX_LORA)]
|
241 |
+
lora_scale = [1.15] * MAX_LORA
|
242 |
+
lora_image = [None] * MAX_LORA
|
243 |
+
for i in range(MAX_LORA):
|
244 |
+
if len(selected_indices) >= i + 1:
|
245 |
+
lora = current_loras[selected_indices[i]]
|
246 |
+
selected_info[i] = f"### LoRA {i+1} Selected: [{lora['title']}]({lora['repo']}) ✨"
|
247 |
+
lora_image[i] = lora['image'] if lora['image'] else None
|
248 |
+
return selected_info, lora_scale, lora_image
|
249 |
+
|
250 |
def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
|
251 |
+
updates = [gr.update()] * MAX_LORA
|
252 |
selected_index = evt.index
|
253 |
selected_indices = selected_indices or []
|
254 |
if selected_index in selected_indices:
|
255 |
selected_indices.remove(selected_index)
|
256 |
else:
|
257 |
+
if len(selected_indices) < MAX_LORA:
|
258 |
selected_indices.append(selected_index)
|
259 |
else:
|
260 |
+
gr.Warning(f"You can select up to {MAX_LORA} LoRAs, remove one to select a new one.")
|
261 |
+
return gr.update(), *updates, selected_indices, *updates, width, height, *updates
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
|
263 |
+
selected_info, lora_scale, lora_image = get_lora_selected_info(selected_indices, loras_state)
|
264 |
if selected_indices:
|
265 |
last_selected_lora = loras_state[selected_indices[-1]]
|
266 |
new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
|
267 |
else:
|
268 |
new_placeholder = "Type a prompt"
|
269 |
|
270 |
+
return gr.update(placeholder=new_placeholder), *selected_info, selected_indices, *lora_scale, width, height, *lora_image
|
271 |
+
|
272 |
+
def remove_lora(selected_indices, loras_state, n: int):
|
273 |
+
if len(selected_indices) >= n + 1:
|
274 |
+
selected_indices.pop(n)
|
275 |
+
selected_info, lora_scale, lora_image = get_lora_selected_info(selected_indices, loras_state)
|
276 |
+
return *selected_info, selected_indices, *lora_scale, *lora_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
|
278 |
def randomize_loras(selected_indices, loras_state):
|
279 |
+
if len(loras_state) < MAX_LORA:
|
280 |
raise gr.Error("Not enough LoRAs to randomize.")
|
281 |
+
selected_indices = random.sample(range(len(loras_state)), MAX_LORA)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
random_prompt = random.choice(prompt_values)
|
283 |
+
selected_info, lora_scale, lora_image = get_lora_selected_info(selected_indices, loras_state)
|
284 |
+
return *selected_info, selected_indices, *lora_scale, *lora_image, random_prompt
|
285 |
|
286 |
def download_loras_images(loras_json_orig: list[dict]):
|
287 |
api = HfApi(token=HF_TOKEN)
|
|
|
307 |
return loras_json
|
308 |
|
309 |
def add_custom_lora(custom_lora, selected_indices, current_loras, gallery):
|
310 |
+
updates = [gr.update()] * MAX_LORA
|
311 |
if custom_lora:
|
312 |
try:
|
313 |
title, repo, path, trigger_word, image = check_custom_model(custom_lora)
|
|
|
337 |
# Update gallery
|
338 |
gallery_items = [(item["image"], item["title"]) for item in current_loras]
|
339 |
# Update selected_indices if there's room
|
340 |
+
if len(selected_indices) < MAX_LORA:
|
341 |
selected_indices.append(existing_item_index)
|
342 |
else:
|
343 |
+
gr.Warning(f"You can select up to {MAX_LORA} LoRAs, remove one to select a new one.")
|
344 |
|
345 |
# Update selected_info and images
|
346 |
+
selected_info, lora_scale, lora_image = get_lora_selected_info(selected_indices, current_loras)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
347 |
print("Finished adding custom LoRA")
|
348 |
return (
|
349 |
current_loras,
|
350 |
gr.update(value=gallery_items),
|
351 |
+
*selected_info,
|
|
|
352 |
selected_indices,
|
353 |
+
*lora_scale,
|
354 |
+
*lora_image
|
|
|
|
|
355 |
)
|
356 |
except Exception as e:
|
357 |
print(e)
|
358 |
gr.Warning(str(e))
|
359 |
+
return current_loras, gr.update(), *updates, selected_indices, *updates, *updates
|
360 |
else:
|
361 |
+
return current_loras, gr.update(), *updates, selected_indices, *updates, *updates
|
362 |
|
363 |
def remove_custom_lora(selected_indices, current_loras, gallery):
|
364 |
if current_loras:
|
|
|
372 |
# Update gallery
|
373 |
gallery_items = [(item["image"], item["title"]) for item in current_loras]
|
374 |
# Update selected_info and images
|
375 |
+
selected_info, lora_scale, lora_image = get_lora_selected_info(selected_indices, current_loras)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
376 |
return (
|
377 |
current_loras,
|
378 |
gr.update(value=gallery_items),
|
379 |
+
*selected_info,
|
|
|
380 |
selected_indices,
|
381 |
+
*lora_scale,
|
382 |
+
*lora_image
|
|
|
|
|
383 |
)
|
384 |
|
385 |
@spaces.GPU(duration=70)
|
|
|
531 |
raise gr.Error(f"I2I Inference Error: {e}") from e
|
532 |
|
533 |
def run_lora(prompt: str, image_input: dict, image_strength: float, task_type: str, turbo_mode: str, blur_mask: bool, blur_factor: float,
|
534 |
+
cfg_scale: float, steps: int, selected_indices, lora_scale: list[float],
|
535 |
randomize_seed: bool, seed: int, width: int, height: int, sigmas_factor: float, loras_state,
|
536 |
lora_json, cn_on: bool, translate_on: bool, progress=gr.Progress(track_tqdm=True)):
|
537 |
global pipe, pipe_i2i
|
|
|
593 |
lora_name = f"lora_{idx}"
|
594 |
lora_names.append(lora_name)
|
595 |
print(f"Lora Name: {lora_name}")
|
596 |
+
lora_weights.append(lora_scale[idx])
|
597 |
+
print(f"Lora Weight: {lora_scale[idx]}")
|
598 |
lora_path = lora['repo']
|
599 |
weight_name = lora.get("weights")
|
600 |
print(f"Lora Path: {lora_path}")
|
|
|
738 |
elem_id="title",
|
739 |
)
|
740 |
loras_state = gr.State(loras)
|
741 |
+
loras_scale = gr.State([1.15] * MAX_LORA)
|
742 |
selected_indices = gr.State([])
|
743 |
+
num = [gr.State(value=i) for i in range(MAX_LORA)]
|
744 |
with gr.Row():
|
745 |
with gr.Column(scale=3):
|
746 |
with gr.Group():
|
|
|
764 |
with gr.Row(elem_id="loaded_loras"):
|
765 |
with gr.Column(scale=1, min_width=25):
|
766 |
randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
|
767 |
+
lora_image = [None] * MAX_LORA
|
768 |
+
selected_info = [None] * MAX_LORA
|
769 |
+
lora_scale = [None] * MAX_LORA
|
770 |
+
remove_button = [None] * MAX_LORA
|
771 |
+
for i in range(MAX_LORA):
|
772 |
+
with gr.Column(scale=8):
|
773 |
+
with gr.Row():
|
774 |
+
with gr.Column(scale=0, min_width=50):
|
775 |
+
lora_image[i] = gr.Image(label=f"LoRA {int(i+1)} Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
|
776 |
+
with gr.Column(scale=3, min_width=100):
|
777 |
+
selected_info[i] = gr.Markdown(f"Select a LoRA {int(i+1)}")
|
778 |
+
with gr.Column(scale=5, min_width=50):
|
779 |
+
lora_scale[i] = gr.Slider(label=f"LoRA {int(i+1)} Scale", minimum=0, maximum=3, step=0.01, value=1.15)
|
780 |
+
with gr.Row():
|
781 |
+
remove_button[i] = gr.Button("Remove", size="sm")
|
|
|
|
|
|
|
|
|
|
|
782 |
with gr.Row():
|
783 |
with gr.Column():
|
784 |
+
#selected_info = gr.Markdown("")
|
785 |
gallery = gr.Gallery([(item["image"], item["title"]) for item in loras], label="LoRA Gallery", allow_preview=False,
|
786 |
columns=4, elem_id="gallery", show_share_button=False, interactive=False)
|
787 |
with gr.Group():
|
|
|
908 |
gallery.select(
|
909 |
update_selection,
|
910 |
inputs=[selected_indices, loras_state, width, height],
|
911 |
+
outputs=[prompt, *selected_info, selected_indices, *lora_scale, width, height, *lora_image])
|
912 |
+
for i in range(MAX_LORA):
|
913 |
+
remove_button[i].click(
|
914 |
+
remove_lora,
|
915 |
+
inputs=[selected_indices, loras_state, num[i]],
|
916 |
+
outputs=[*selected_info, selected_indices, *lora_scale, *lora_image]
|
917 |
+
)
|
918 |
+
lora_scale[i].change(lambda n, l, s: [s if i == n else x for i, x in enumerate(l)], [num[i], loras_scale, lora_scale[i]], [loras_scale])
|
919 |
+
|
|
|
|
|
920 |
randomize_button.click(
|
921 |
randomize_loras,
|
922 |
inputs=[selected_indices, loras_state],
|
923 |
+
outputs=[*selected_info, selected_indices, *lora_scale, *lora_image, prompt]
|
924 |
)
|
925 |
add_custom_lora_button.click(
|
926 |
add_custom_lora,
|
927 |
inputs=[custom_lora, selected_indices, loras_state, gallery],
|
928 |
+
outputs=[loras_state, gallery, *selected_info, selected_indices, *lora_scale, *lora_image]
|
929 |
)
|
930 |
remove_custom_lora_button.click(
|
931 |
remove_custom_lora,
|
932 |
inputs=[selected_indices, loras_state, gallery],
|
933 |
+
outputs=[loras_state, gallery, *selected_info, selected_indices, *lora_scale, *lora_image]
|
934 |
)
|
935 |
gr.on(
|
936 |
triggers=[generate_button.click, prompt.submit],
|
|
|
942 |
trigger_mode="once",
|
943 |
).success(
|
944 |
fn=run_lora,
|
945 |
+
inputs=[prompt, input_image, image_strength, task_type, turbo_mode, blur_mask, blur_factor, cfg_scale, steps, selected_indices, loras_scale,
|
946 |
randomize_seed, seed, width, height, sigmas_factor, loras_state, lora_repo_json, cn_on, auto_trans],
|
947 |
outputs=[result, seed, progress_bar],
|
948 |
queue=True,
|
env.py
CHANGED
@@ -6,7 +6,8 @@ HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
6 |
HF_READ_TOKEN = os.environ.get('HF_READ_TOKEN') # only use for private repo
|
7 |
|
8 |
|
9 |
-
|
|
|
10 |
num_cns = 2
|
11 |
|
12 |
|
|
|
6 |
HF_READ_TOKEN = os.environ.get('HF_READ_TOKEN') # only use for private repo
|
7 |
|
8 |
|
9 |
+
MAX_LORA = 5 # number of internal LoRA slots
|
10 |
+
num_loras = 5 # number of external LoRA slots
|
11 |
num_cns = 2
|
12 |
|
13 |
|