Spaces:
Running
on
Zero
Running
on
Zero
Upload 3 files
Browse files- app.py +62 -99
- modutils.py +153 -170
app.py
CHANGED
|
@@ -162,16 +162,34 @@ def process_string(input_string):
|
|
| 162 |
|
| 163 |
## BEGIN MOD
|
| 164 |
from modutils import (
|
|
|
|
| 165 |
download_private_repo,
|
| 166 |
-
get_local_model_list,
|
| 167 |
get_model_id_list,
|
| 168 |
-
escape_lora_basename,
|
| 169 |
-
list_uniq,
|
| 170 |
-
list_sub,
|
| 171 |
get_tupled_embed_list,
|
| 172 |
-
update_lora_dict,
|
| 173 |
get_lora_model_list,
|
| 174 |
get_all_lora_tupled_list,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
)
|
| 176 |
from env import (
|
| 177 |
hf_token,
|
|
@@ -237,35 +255,6 @@ embed_sdxl_list = get_model_list(directory_embeds_sdxl) + get_model_list(directo
|
|
| 237 |
|
| 238 |
def get_embed_list(pipeline_name):
|
| 239 |
return get_tupled_embed_list(embed_sdxl_list if pipeline_name == "StableDiffusionXLPipeline" else embed_list)
|
| 240 |
-
|
| 241 |
-
def get_my_lora(link_url):
|
| 242 |
-
from pathlib import Path
|
| 243 |
-
before = get_local_model_list(directory_loras)
|
| 244 |
-
for url in [url.strip() for url in link_url.split(',')]:
|
| 245 |
-
if not Path(f"{directory_loras}/{url.split('/')[-1]}").exists():
|
| 246 |
-
download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
|
| 247 |
-
after = get_local_model_list(directory_loras)
|
| 248 |
-
new_files = list_sub(after, before)
|
| 249 |
-
for file in new_files:
|
| 250 |
-
path = Path(file)
|
| 251 |
-
if path.exists():
|
| 252 |
-
new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
|
| 253 |
-
path.resolve().rename(new_path.resolve())
|
| 254 |
-
update_lora_dict(str(new_path))
|
| 255 |
-
new_lora_model_list = get_lora_model_list()
|
| 256 |
-
new_lora_tupled_list = get_all_lora_tupled_list()
|
| 257 |
-
|
| 258 |
-
return gr.update(
|
| 259 |
-
choices=new_lora_tupled_list, value=new_lora_model_list[-1]
|
| 260 |
-
), gr.update(
|
| 261 |
-
choices=new_lora_tupled_list
|
| 262 |
-
), gr.update(
|
| 263 |
-
choices=new_lora_tupled_list
|
| 264 |
-
), gr.update(
|
| 265 |
-
choices=new_lora_tupled_list
|
| 266 |
-
), gr.update(
|
| 267 |
-
choices=new_lora_tupled_list
|
| 268 |
-
)
|
| 269 |
## END MOD
|
| 270 |
|
| 271 |
print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
|
|
@@ -352,7 +341,6 @@ warnings.filterwarnings(action="ignore", category=FutureWarning, module="transfo
|
|
| 352 |
from stablepy import logger
|
| 353 |
logger.setLevel(logging.CRITICAL)
|
| 354 |
|
| 355 |
-
|
| 356 |
from v2 import (
|
| 357 |
V2UI,
|
| 358 |
parse_upsampling_output,
|
|
@@ -376,29 +364,6 @@ from tagger import (
|
|
| 376 |
translate_prompt,
|
| 377 |
select_random_character,
|
| 378 |
)
|
| 379 |
-
from modutils import (
|
| 380 |
-
change_interface_mode,
|
| 381 |
-
get_t2i_model_info,
|
| 382 |
-
get_tupled_model_list,
|
| 383 |
-
save_gallery_images,
|
| 384 |
-
upload_file_lora,
|
| 385 |
-
move_file_lora,
|
| 386 |
-
set_lora_trigger,
|
| 387 |
-
set_lora_prompt,
|
| 388 |
-
apply_lora_prompt,
|
| 389 |
-
search_civitai_lora,
|
| 390 |
-
select_civitai_lora,
|
| 391 |
-
set_textual_inversion_prompt,
|
| 392 |
-
get_model_pipeline,
|
| 393 |
-
set_optimization,
|
| 394 |
-
set_sampler_settings,
|
| 395 |
-
process_style_prompt,
|
| 396 |
-
optimization_list,
|
| 397 |
-
preset_styles,
|
| 398 |
-
preset_quality,
|
| 399 |
-
preset_sampler_setting,
|
| 400 |
-
set_quick_presets,
|
| 401 |
-
)
|
| 402 |
def description_ui():
|
| 403 |
gr.Markdown(
|
| 404 |
"""
|
|
@@ -584,9 +549,12 @@ class GuiSD:
|
|
| 584 |
msg_lora = []
|
| 585 |
|
| 586 |
## BEGIN MOD
|
| 587 |
-
prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
|
| 588 |
global lora_model_list
|
| 589 |
lora_model_list = get_lora_model_list()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 590 |
## END MOD
|
| 591 |
|
| 592 |
if model_name in model_list:
|
|
@@ -1032,40 +1000,40 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", elem_id="main", css=CSS) as app:
|
|
| 1032 |
hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
|
| 1033 |
|
| 1034 |
with gr.Accordion("LoRA", open=False, visible=True) as menu_lora:
|
| 1035 |
-
lora1_gui = gr.Dropdown(label="
|
| 1036 |
-
lora_scale_1_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="
|
| 1037 |
with gr.Row():
|
| 1038 |
with gr.Group():
|
| 1039 |
-
|
| 1040 |
-
|
| 1041 |
lora1_desc_gui = gr.Markdown(value="", visible=False)
|
| 1042 |
-
lora2_gui = gr.Dropdown(label="
|
| 1043 |
-
lora_scale_2_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="
|
| 1044 |
with gr.Row():
|
| 1045 |
with gr.Group():
|
| 1046 |
-
|
| 1047 |
-
|
| 1048 |
lora2_desc_gui = gr.Markdown(value="", visible=False)
|
| 1049 |
-
lora3_gui = gr.Dropdown(label="
|
| 1050 |
-
lora_scale_3_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="
|
| 1051 |
with gr.Row():
|
| 1052 |
with gr.Group():
|
| 1053 |
-
|
| 1054 |
-
|
| 1055 |
lora3_desc_gui = gr.Markdown(value="", visible=False)
|
| 1056 |
-
lora4_gui = gr.Dropdown(label="
|
| 1057 |
-
lora_scale_4_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="
|
| 1058 |
with gr.Row():
|
| 1059 |
with gr.Group():
|
| 1060 |
-
|
| 1061 |
-
|
| 1062 |
lora4_desc_gui = gr.Markdown(value="", visible=False)
|
| 1063 |
-
lora5_gui = gr.Dropdown(label="
|
| 1064 |
-
lora_scale_5_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="
|
| 1065 |
with gr.Row():
|
| 1066 |
with gr.Group():
|
| 1067 |
-
|
| 1068 |
-
|
| 1069 |
lora5_desc_gui = gr.Markdown(value="", visible=False)
|
| 1070 |
with gr.Accordion("From URL", open=True, visible=True):
|
| 1071 |
with gr.Row():
|
|
@@ -1590,31 +1558,26 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", elem_id="main", css=CSS) as app:
|
|
| 1590 |
sampler_selector_gui.change(set_sampler_settings, [sampler_selector_gui], [sampler_gui, steps_gui, cfg_gui, clip_skip_gui, img_width_gui, img_height_gui, optimization_gui], queue=False)
|
| 1591 |
optimization_gui.change(set_optimization, [optimization_gui, steps_gui, cfg_gui, sampler_gui, clip_skip_gui, lora5_gui, lora_scale_5_gui], [steps_gui, cfg_gui, sampler_gui, clip_skip_gui, lora5_gui, lora_scale_5_gui], queue=False)
|
| 1592 |
|
| 1593 |
-
lora1_gui.change(set_lora_prompt, [prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui, lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui], [prompt_gui], queue=False)\
|
| 1594 |
-
.success(set_lora_trigger, [lora1_gui], [lora1_trigger_gui, lora1_copy_button, lora1_desc_gui, lora1_gui], scroll_to_output=True, queue=False)
|
| 1595 |
-
lora2_gui.change(set_lora_prompt, [prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui, lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui], [prompt_gui], queue=False)\
|
| 1596 |
-
.success(set_lora_trigger, [lora2_gui], [lora2_trigger_gui, lora2_copy_button, lora2_desc_gui, lora2_gui], scroll_to_output=True, queue=False)
|
| 1597 |
-
lora3_gui.change(set_lora_prompt, [prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui, lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui], [prompt_gui], queue=False)\
|
| 1598 |
-
.success(set_lora_trigger, [lora3_gui], [lora3_trigger_gui, lora3_copy_button, lora3_desc_gui, lora3_gui], scroll_to_output=True, queue=False)
|
| 1599 |
-
lora4_gui.change(set_lora_prompt, [prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui, lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui], [prompt_gui], queue=False)\
|
| 1600 |
-
.success(set_lora_trigger, [lora4_gui], [lora4_trigger_gui, lora4_copy_button, lora4_desc_gui, lora4_gui], scroll_to_output=True, queue=False)
|
| 1601 |
-
lora5_gui.change(set_lora_prompt, [prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui, lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui], [prompt_gui], queue=False)\
|
| 1602 |
-
.success(set_lora_trigger, [lora5_gui], [lora5_trigger_gui, lora5_copy_button, lora5_desc_gui, lora5_gui], scroll_to_output=True, queue=False)
|
| 1603 |
gr.on(
|
| 1604 |
-
triggers=[lora_scale_1_gui.change,
|
| 1605 |
-
|
| 1606 |
-
|
|
|
|
| 1607 |
inputs=[prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui,
|
| 1608 |
lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui],
|
| 1609 |
-
outputs=[prompt_gui
|
| 1610 |
-
|
|
|
|
|
|
|
|
|
|
| 1611 |
queue=False,
|
|
|
|
| 1612 |
)
|
| 1613 |
-
|
| 1614 |
-
|
| 1615 |
-
|
| 1616 |
-
|
| 1617 |
-
|
| 1618 |
gr.on(
|
| 1619 |
triggers=[search_civitai_button_lora.click, search_civitai_query_lora.submit],
|
| 1620 |
fn=search_civitai_lora,
|
|
|
|
| 162 |
|
| 163 |
## BEGIN MOD
|
| 164 |
from modutils import (
|
| 165 |
+
list_uniq,
|
| 166 |
download_private_repo,
|
|
|
|
| 167 |
get_model_id_list,
|
|
|
|
|
|
|
|
|
|
| 168 |
get_tupled_embed_list,
|
|
|
|
| 169 |
get_lora_model_list,
|
| 170 |
get_all_lora_tupled_list,
|
| 171 |
+
update_loras,
|
| 172 |
+
apply_lora_prompt,
|
| 173 |
+
set_prompt_loras,
|
| 174 |
+
get_my_lora,
|
| 175 |
+
upload_file_lora,
|
| 176 |
+
move_file_lora,
|
| 177 |
+
search_civitai_lora,
|
| 178 |
+
select_civitai_lora,
|
| 179 |
+
set_textual_inversion_prompt,
|
| 180 |
+
get_model_pipeline,
|
| 181 |
+
change_interface_mode,
|
| 182 |
+
get_t2i_model_info,
|
| 183 |
+
get_tupled_model_list,
|
| 184 |
+
save_gallery_images,
|
| 185 |
+
set_optimization,
|
| 186 |
+
set_sampler_settings,
|
| 187 |
+
set_quick_presets,
|
| 188 |
+
process_style_prompt,
|
| 189 |
+
optimization_list,
|
| 190 |
+
preset_styles,
|
| 191 |
+
preset_quality,
|
| 192 |
+
preset_sampler_setting,
|
| 193 |
)
|
| 194 |
from env import (
|
| 195 |
hf_token,
|
|
|
|
| 255 |
|
| 256 |
def get_embed_list(pipeline_name):
|
| 257 |
return get_tupled_embed_list(embed_sdxl_list if pipeline_name == "StableDiffusionXLPipeline" else embed_list)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
## END MOD
|
| 259 |
|
| 260 |
print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
|
|
|
|
| 341 |
from stablepy import logger
|
| 342 |
logger.setLevel(logging.CRITICAL)
|
| 343 |
|
|
|
|
| 344 |
from v2 import (
|
| 345 |
V2UI,
|
| 346 |
parse_upsampling_output,
|
|
|
|
| 364 |
translate_prompt,
|
| 365 |
select_random_character,
|
| 366 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 367 |
def description_ui():
|
| 368 |
gr.Markdown(
|
| 369 |
"""
|
|
|
|
| 549 |
msg_lora = []
|
| 550 |
|
| 551 |
## BEGIN MOD
|
|
|
|
| 552 |
global lora_model_list
|
| 553 |
lora_model_list = get_lora_model_list()
|
| 554 |
+
lora1, lora_scale1, lora2, lora_scale2, lora3, lora_scale3, lora4, lora_scale4, lora5, lora_scale5 = \
|
| 555 |
+
set_prompt_loras(prompt, syntax_weights, lora1, lora_scale1, lora2, lora_scale2, lora3,
|
| 556 |
+
lora_scale3, lora4, lora_scale4, lora5, lora_scale5)
|
| 557 |
+
prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
|
| 558 |
## END MOD
|
| 559 |
|
| 560 |
if model_name in model_list:
|
|
|
|
| 1000 |
hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
|
| 1001 |
|
| 1002 |
with gr.Accordion("LoRA", open=False, visible=True) as menu_lora:
|
| 1003 |
+
lora1_gui = gr.Dropdown(label="LoRA1", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
|
| 1004 |
+
lora_scale_1_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA Scale 1")
|
| 1005 |
with gr.Row():
|
| 1006 |
with gr.Group():
|
| 1007 |
+
lora1_info_gui = gr.Textbox(label="LoRA1 prompts", info="Example of prompt:", value="None", show_copy_button=True, interactive=False, visible=False)
|
| 1008 |
+
lora1_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
|
| 1009 |
lora1_desc_gui = gr.Markdown(value="", visible=False)
|
| 1010 |
+
lora2_gui = gr.Dropdown(label="LoRA2", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
|
| 1011 |
+
lora_scale_2_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA Scale 2")
|
| 1012 |
with gr.Row():
|
| 1013 |
with gr.Group():
|
| 1014 |
+
lora2_info_gui = gr.Textbox(label="LoRA2 prompts", info="Example of prompt:", value="None", show_copy_button=True, interactive=False, visible=False)
|
| 1015 |
+
lora2_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
|
| 1016 |
lora2_desc_gui = gr.Markdown(value="", visible=False)
|
| 1017 |
+
lora3_gui = gr.Dropdown(label="LoRA3", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
|
| 1018 |
+
lora_scale_3_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA Scale 3")
|
| 1019 |
with gr.Row():
|
| 1020 |
with gr.Group():
|
| 1021 |
+
lora3_info_gui = gr.Textbox(label="LoRA3 prompts", info="Example of prompt:", value="None", show_copy_button=True, interactive=False, visible=False)
|
| 1022 |
+
lora3_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
|
| 1023 |
lora3_desc_gui = gr.Markdown(value="", visible=False)
|
| 1024 |
+
lora4_gui = gr.Dropdown(label="LoRA4", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
|
| 1025 |
+
lora_scale_4_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA Scale 4")
|
| 1026 |
with gr.Row():
|
| 1027 |
with gr.Group():
|
| 1028 |
+
lora4_info_gui = gr.Textbox(label="LoRA4 prompts", info="Example of prompt:", value="None", show_copy_button=True, interactive=False, visible=False)
|
| 1029 |
+
lora4_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
|
| 1030 |
lora4_desc_gui = gr.Markdown(value="", visible=False)
|
| 1031 |
+
lora5_gui = gr.Dropdown(label="LoRA5", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
|
| 1032 |
+
lora_scale_5_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA Scale 5")
|
| 1033 |
with gr.Row():
|
| 1034 |
with gr.Group():
|
| 1035 |
+
lora5_info_gui = gr.Textbox(label="LoRA5 prompts", info="Example of prompt", value="None", show_copy_button=True, interactive=False, visible=False)
|
| 1036 |
+
lora5_copy_gui = gr.Button(value="Copy example to prompt", visible=False)
|
| 1037 |
lora5_desc_gui = gr.Markdown(value="", visible=False)
|
| 1038 |
with gr.Accordion("From URL", open=True, visible=True):
|
| 1039 |
with gr.Row():
|
|
|
|
| 1558 |
sampler_selector_gui.change(set_sampler_settings, [sampler_selector_gui], [sampler_gui, steps_gui, cfg_gui, clip_skip_gui, img_width_gui, img_height_gui, optimization_gui], queue=False)
|
| 1559 |
optimization_gui.change(set_optimization, [optimization_gui, steps_gui, cfg_gui, sampler_gui, clip_skip_gui, lora5_gui, lora_scale_5_gui], [steps_gui, cfg_gui, sampler_gui, clip_skip_gui, lora5_gui, lora_scale_5_gui], queue=False)
|
| 1560 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1561 |
gr.on(
|
| 1562 |
+
triggers=[lora1_gui.change, lora_scale_1_gui.change, lora2_gui.change, lora_scale_2_gui.change,
|
| 1563 |
+
lora3_gui.change, lora_scale_3_gui.change, lora4_gui.change, lora_scale_4_gui.change,
|
| 1564 |
+
lora5_gui.change, lora_scale_5_gui.change, prompt_syntax_gui.change],
|
| 1565 |
+
fn=update_loras,
|
| 1566 |
inputs=[prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui,
|
| 1567 |
lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui],
|
| 1568 |
+
outputs=[prompt_gui, lora1_gui, lora_scale_1_gui, lora1_info_gui, lora1_copy_gui, lora1_desc_gui,
|
| 1569 |
+
lora2_gui, lora_scale_2_gui, lora2_info_gui, lora2_copy_gui, lora2_desc_gui,
|
| 1570 |
+
lora3_gui, lora_scale_3_gui, lora3_info_gui, lora3_copy_gui, lora3_desc_gui,
|
| 1571 |
+
lora4_gui, lora_scale_4_gui, lora4_info_gui, lora4_copy_gui, lora4_desc_gui,
|
| 1572 |
+
lora5_gui, lora_scale_5_gui, lora5_info_gui, lora5_copy_gui, lora5_desc_gui],
|
| 1573 |
queue=False,
|
| 1574 |
+
trigger_mode="once",
|
| 1575 |
)
|
| 1576 |
+
lora1_copy_gui.click(apply_lora_prompt, [prompt_gui, lora1_info_gui], [prompt_gui], queue=False)
|
| 1577 |
+
lora2_copy_gui.click(apply_lora_prompt, [prompt_gui, lora2_info_gui], [prompt_gui], queue=False)
|
| 1578 |
+
lora3_copy_gui.click(apply_lora_prompt, [prompt_gui, lora3_info_gui], [prompt_gui], queue=False)
|
| 1579 |
+
lora4_copy_gui.click(apply_lora_prompt, [prompt_gui, lora4_info_gui], [prompt_gui], queue=False)
|
| 1580 |
+
lora5_copy_gui.click(apply_lora_prompt, [prompt_gui, lora5_info_gui], [prompt_gui], queue=False)
|
| 1581 |
gr.on(
|
| 1582 |
triggers=[search_civitai_button_lora.click, search_civitai_query_lora.submit],
|
| 1583 |
fn=search_civitai_lora,
|
modutils.py
CHANGED
|
@@ -296,37 +296,6 @@ def get_private_lora_model_lists():
|
|
| 296 |
private_lora_model_list = get_private_lora_model_lists()
|
| 297 |
|
| 298 |
|
| 299 |
-
def set_lora_prompt(prompt_gui, prompt_syntax_gui, lora1_gui, lora_scale_1_gui, lora2_gui, lora_scale_2_gui,\
|
| 300 |
-
lora3_gui, lora_scale_3_gui, lora4_gui, lora_scale_4_gui, lora5_gui, lora_scale_5_gui):
|
| 301 |
-
import os
|
| 302 |
-
if not "Classic" in str(prompt_syntax_gui): return prompt_gui
|
| 303 |
-
loras = []
|
| 304 |
-
if lora1_gui and lora1_gui != "None":
|
| 305 |
-
basename = os.path.splitext(os.path.basename(lora1_gui))[0]
|
| 306 |
-
loras.append(f"<lora:{basename}:{lora_scale_1_gui:.2f}>")
|
| 307 |
-
if lora2_gui and lora2_gui != "None":
|
| 308 |
-
basename = os.path.splitext(os.path.basename(lora2_gui))[0]
|
| 309 |
-
loras.append(f"<lora:{basename}:{lora_scale_2_gui:.2f}>")
|
| 310 |
-
if lora3_gui and lora3_gui != "None":
|
| 311 |
-
basename = os.path.splitext(os.path.basename(lora3_gui))[0]
|
| 312 |
-
loras.append(f"<lora:{basename}:{lora_scale_3_gui:.2f}>")
|
| 313 |
-
if lora4_gui and lora4_gui != "None":
|
| 314 |
-
basename = os.path.splitext(os.path.basename(lora4_gui))[0]
|
| 315 |
-
loras.append(f"<lora:{basename}:{lora_scale_4_gui:.2f}>")
|
| 316 |
-
if lora5_gui and lora5_gui != "None":
|
| 317 |
-
basename = os.path.splitext(os.path.basename(lora5_gui))[0]
|
| 318 |
-
loras.append(f"<lora:{basename}:{lora_scale_5_gui:.2f}>")
|
| 319 |
-
tags = prompt_gui.split(",") if prompt_gui else []
|
| 320 |
-
prompts = []
|
| 321 |
-
for tag in tags:
|
| 322 |
-
tag = str(tag).strip()
|
| 323 |
-
if tag and not "<lora" in tag:
|
| 324 |
-
prompts.append(tag)
|
| 325 |
-
empty = [""]
|
| 326 |
-
prompt = ", ".join(prompts + loras + empty)
|
| 327 |
-
return gr.update(value=prompt)
|
| 328 |
-
|
| 329 |
-
|
| 330 |
def get_civitai_info(path):
|
| 331 |
global civitai_not_exists_list
|
| 332 |
import requests
|
|
@@ -511,8 +480,9 @@ def get_valid_lora_wt(prompt: str, lora_path: str, lora_wt: float):
|
|
| 511 |
return wt
|
| 512 |
|
| 513 |
|
| 514 |
-
def set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
|
| 515 |
import re
|
|
|
|
| 516 |
lora1 = get_valid_lora_name(lora1)
|
| 517 |
lora2 = get_valid_lora_name(lora2)
|
| 518 |
lora3 = get_valid_lora_name(lora3)
|
|
@@ -530,7 +500,6 @@ def set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
|
| 530 |
on4, label4, tag4, md4 = get_lora_info(lora4)
|
| 531 |
on5, label5, tag5, md5 = get_lora_info(lora5)
|
| 532 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 533 |
-
|
| 534 |
prompts = prompt.split(",") if prompt else []
|
| 535 |
for p in prompts:
|
| 536 |
p = str(p).strip()
|
|
@@ -570,7 +539,6 @@ def set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
|
| 570 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 571 |
lora5_wt = safe_float(wt)
|
| 572 |
on5 = True
|
| 573 |
-
|
| 574 |
return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
|
| 575 |
|
| 576 |
|
|
@@ -614,7 +582,7 @@ def normalize_prompt_list(tags: list[str]):
|
|
| 614 |
prompts.append(tag)
|
| 615 |
return prompts
|
| 616 |
|
| 617 |
-
|
| 618 |
def apply_lora_prompt(prompt: str, lora_info: str):
|
| 619 |
if lora_info == "None": return gr.update(value=prompt)
|
| 620 |
tags = prompt.split(",") if prompt else []
|
|
@@ -627,9 +595,9 @@ def apply_lora_prompt(prompt: str, lora_info: str):
|
|
| 627 |
empty = [""]
|
| 628 |
prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
|
| 629 |
return gr.update(value=prompt)
|
| 630 |
-
'''
|
| 631 |
|
| 632 |
-
|
|
|
|
| 633 |
import re
|
| 634 |
on1, label1, tag1, md1 = get_lora_info(lora1)
|
| 635 |
on2, label2, tag2, md2 = get_lora_info(lora2)
|
|
@@ -638,59 +606,60 @@ def update_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora
|
|
| 638 |
on5, label5, tag5, md5 = get_lora_info(lora5)
|
| 639 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 640 |
|
| 641 |
-
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
|
| 645 |
-
|
| 646 |
-
|
| 647 |
-
if
|
| 648 |
-
|
| 649 |
-
|
| 650 |
-
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
|
| 657 |
-
|
| 658 |
-
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
|
| 663 |
-
|
| 664 |
-
|
| 665 |
-
|
| 666 |
-
|
| 667 |
-
|
| 668 |
-
|
| 669 |
-
|
| 670 |
-
|
| 671 |
-
|
| 672 |
-
|
| 673 |
-
|
| 674 |
-
|
| 675 |
-
|
| 676 |
-
|
| 677 |
-
|
| 678 |
-
|
| 679 |
-
|
| 680 |
-
|
| 681 |
-
|
| 682 |
-
|
| 683 |
-
|
| 684 |
-
|
| 685 |
-
|
| 686 |
-
|
| 687 |
-
|
| 688 |
-
|
| 689 |
-
|
| 690 |
-
|
| 691 |
-
|
| 692 |
-
|
| 693 |
-
|
|
|
|
| 694 |
choices = get_all_lora_tupled_list()
|
| 695 |
|
| 696 |
return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
|
|
@@ -705,61 +674,34 @@ def update_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora
|
|
| 705 |
gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
|
| 706 |
|
| 707 |
|
| 708 |
-
def
|
| 709 |
-
|
| 710 |
-
|
| 711 |
-
|
| 712 |
-
|
| 713 |
-
|
| 714 |
-
|
| 715 |
-
|
| 716 |
-
|
| 717 |
-
|
| 718 |
-
|
| 719 |
-
|
| 720 |
-
|
| 721 |
-
|
| 722 |
-
|
| 723 |
-
|
| 724 |
-
|
| 725 |
-
|
| 726 |
-
|
| 727 |
-
|
| 728 |
-
|
| 729 |
-
|
| 730 |
-
|
| 731 |
-
|
| 732 |
-
|
| 733 |
-
|
| 734 |
-
|
| 735 |
-
|
| 736 |
-
md = f'<img src="{items[4]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL]({items[3]})'
|
| 737 |
-
elif items[3]:
|
| 738 |
-
md = f'[LoRA Model URL]({items[3]})'
|
| 739 |
-
if tag and flag:
|
| 740 |
-
new_lora_model_list = get_lora_model_list()
|
| 741 |
-
return gr.update(value=tag, label=label, visible=True), gr.update(visible=True),\
|
| 742 |
-
gr.update(value=md, visible=True), gr.update(value=str(new_path), choices=get_lora_tupled_list(new_lora_model_list))
|
| 743 |
-
elif tag:
|
| 744 |
-
return gr.update(value=tag, label=label, visible=True), gr.update(visible=True),\
|
| 745 |
-
gr.update(value=md, visible=True), gr.update(value=str(new_path))
|
| 746 |
-
else:
|
| 747 |
-
return gr.update(value=value, label=label, visible=True), gr.update(visible=True),\
|
| 748 |
-
gr.update(value=md, visible=True), gr.update(visible=True)
|
| 749 |
-
|
| 750 |
-
|
| 751 |
-
def apply_lora_prompt(prompt_gui: str, lora_trigger_gui: str):
|
| 752 |
-
if lora_trigger_gui == "None": return gr.update(value=prompt_gui)
|
| 753 |
-
tags = prompt_gui.split(",") if prompt_gui else []
|
| 754 |
-
prompts = normalize_prompt_list(tags)
|
| 755 |
-
|
| 756 |
-
lora_tag = lora_trigger_gui.replace("/",",")
|
| 757 |
-
lora_tags = lora_tag.split(",") if str(lora_trigger_gui) != "None" else []
|
| 758 |
-
lora_prompts = normalize_prompt_list(lora_tags)
|
| 759 |
-
|
| 760 |
-
empty = [""]
|
| 761 |
-
prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
|
| 762 |
-
return gr.update(value=prompt)
|
| 763 |
|
| 764 |
|
| 765 |
def upload_file_lora(files, progress=gr.Progress(track_tqdm=True)):
|
|
@@ -793,16 +735,57 @@ def move_file_lora(filepaths):
|
|
| 793 |
)
|
| 794 |
|
| 795 |
|
| 796 |
-
def
|
|
|
|
|
|
|
| 797 |
import requests
|
|
|
|
| 798 |
from urllib3.util import Retry
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 799 |
from requests.adapters import HTTPAdapter
|
|
|
|
| 800 |
if not query: return None
|
| 801 |
user_agent = get_user_agent()
|
| 802 |
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
| 803 |
base_url = 'https://civitai.com/api/v1/models'
|
| 804 |
params = {'query': query, 'types': ['LORA'], 'sort': 'Highest Rated', 'period': 'AllTime',
|
| 805 |
-
'nsfw': 'true', 'supportsGeneration ': 'true'}
|
| 806 |
session = requests.Session()
|
| 807 |
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
|
| 808 |
session.mount("https://", HTTPAdapter(max_retries=retries))
|
|
@@ -810,26 +793,24 @@ def search_lora_on_civitai(query: str, allow_model: list[str]):
|
|
| 810 |
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(3.0, 30))
|
| 811 |
except Exception as e:
|
| 812 |
return None
|
| 813 |
-
|
| 814 |
-
|
| 815 |
-
|
| 816 |
-
|
| 817 |
-
|
| 818 |
-
for
|
| 819 |
-
|
| 820 |
-
|
| 821 |
-
|
| 822 |
-
|
| 823 |
-
|
| 824 |
-
|
| 825 |
-
|
| 826 |
-
|
| 827 |
-
|
| 828 |
-
|
| 829 |
-
|
| 830 |
-
|
| 831 |
-
|
| 832 |
-
civitai_lora_last_results = {}
|
| 833 |
|
| 834 |
|
| 835 |
def search_civitai_lora(query, base_model):
|
|
@@ -844,17 +825,19 @@ def search_civitai_lora(query, base_model):
|
|
| 844 |
name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
|
| 845 |
value = item['dl_url']
|
| 846 |
choices.append((name, value))
|
| 847 |
-
civitai_lora_last_results[value] = item
|
| 848 |
if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
|
| 849 |
gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
|
| 850 |
-
|
|
|
|
| 851 |
return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
|
| 852 |
gr.update(visible=True), gr.update(visible=True)
|
| 853 |
|
| 854 |
|
| 855 |
def select_civitai_lora(search_result):
|
| 856 |
if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
|
| 857 |
-
|
|
|
|
| 858 |
return gr.update(value=search_result), gr.update(value=md, visible=True)
|
| 859 |
|
| 860 |
|
|
|
|
| 296 |
private_lora_model_list = get_private_lora_model_lists()
|
| 297 |
|
| 298 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
def get_civitai_info(path):
|
| 300 |
global civitai_not_exists_list
|
| 301 |
import requests
|
|
|
|
| 480 |
return wt
|
| 481 |
|
| 482 |
|
| 483 |
+
def set_prompt_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
|
| 484 |
import re
|
| 485 |
+
if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
|
| 486 |
lora1 = get_valid_lora_name(lora1)
|
| 487 |
lora2 = get_valid_lora_name(lora2)
|
| 488 |
lora3 = get_valid_lora_name(lora3)
|
|
|
|
| 500 |
on4, label4, tag4, md4 = get_lora_info(lora4)
|
| 501 |
on5, label5, tag5, md5 = get_lora_info(lora5)
|
| 502 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
|
|
|
| 503 |
prompts = prompt.split(",") if prompt else []
|
| 504 |
for p in prompts:
|
| 505 |
p = str(p).strip()
|
|
|
|
| 539 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 540 |
lora5_wt = safe_float(wt)
|
| 541 |
on5 = True
|
|
|
|
| 542 |
return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
|
| 543 |
|
| 544 |
|
|
|
|
| 582 |
prompts.append(tag)
|
| 583 |
return prompts
|
| 584 |
|
| 585 |
+
|
| 586 |
def apply_lora_prompt(prompt: str, lora_info: str):
|
| 587 |
if lora_info == "None": return gr.update(value=prompt)
|
| 588 |
tags = prompt.split(",") if prompt else []
|
|
|
|
| 595 |
empty = [""]
|
| 596 |
prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
|
| 597 |
return gr.update(value=prompt)
|
|
|
|
| 598 |
|
| 599 |
+
|
| 600 |
+
def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
|
| 601 |
import re
|
| 602 |
on1, label1, tag1, md1 = get_lora_info(lora1)
|
| 603 |
on2, label2, tag2, md2 = get_lora_info(lora2)
|
|
|
|
| 606 |
on5, label5, tag5, md5 = get_lora_info(lora5)
|
| 607 |
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 608 |
|
| 609 |
+
output_prompt = prompt
|
| 610 |
+
if "Classic" in str(prompt_syntax):
|
| 611 |
+
prompts = prompt.split(",") if prompt else []
|
| 612 |
+
output_prompts = []
|
| 613 |
+
for p in prompts:
|
| 614 |
+
p = str(p).strip()
|
| 615 |
+
if "<lora" in p:
|
| 616 |
+
result = re.findall(r'<lora:(.+?):(.+?)>', p)
|
| 617 |
+
if not result: continue
|
| 618 |
+
key = result[0][0]
|
| 619 |
+
wt = result[0][1]
|
| 620 |
+
path = to_lora_path(key)
|
| 621 |
+
if not key in loras_dict.keys() or not path: continue
|
| 622 |
+
if path in lora_paths:
|
| 623 |
+
output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
|
| 624 |
+
elif not on1:
|
| 625 |
+
lora1 = path
|
| 626 |
+
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 627 |
+
lora1_wt = safe_float(wt)
|
| 628 |
+
on1, label1, tag1, md1 = get_lora_info(lora1)
|
| 629 |
+
output_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
|
| 630 |
+
elif not on2:
|
| 631 |
+
lora2 = path
|
| 632 |
+
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 633 |
+
lora2_wt = safe_float(wt)
|
| 634 |
+
on2, label2, tag2, md2 = get_lora_info(lora2)
|
| 635 |
+
output_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
|
| 636 |
+
elif not on3:
|
| 637 |
+
lora3 = path
|
| 638 |
+
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 639 |
+
lora3_wt = safe_float(wt)
|
| 640 |
+
on3, label3, tag3, md3 = get_lora_info(lora3)
|
| 641 |
+
output_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
|
| 642 |
+
elif not on4:
|
| 643 |
+
lora4 = path
|
| 644 |
+
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 645 |
+
lora4_wt = safe_float(wt)
|
| 646 |
+
on4, label4, tag4, md4 = get_lora_info(lora4)
|
| 647 |
+
output_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
|
| 648 |
+
elif not on5:
|
| 649 |
+
lora5 = path
|
| 650 |
+
lora_paths = [lora1, lora2, lora3, lora4, lora5]
|
| 651 |
+
lora5_wt = safe_float(wt)
|
| 652 |
+
on5, label5, tag5, md5 = get_lora_info(lora5)
|
| 653 |
+
output_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
|
| 654 |
+
elif p:
|
| 655 |
+
output_prompts.append(p)
|
| 656 |
+
lora_prompts = []
|
| 657 |
+
if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
|
| 658 |
+
if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
|
| 659 |
+
if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
|
| 660 |
+
if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
|
| 661 |
+
if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
|
| 662 |
+
output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
|
| 663 |
choices = get_all_lora_tupled_list()
|
| 664 |
|
| 665 |
return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
|
|
|
|
| 674 |
gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
|
| 675 |
|
| 676 |
|
| 677 |
+
def get_my_lora(link_url):
|
| 678 |
+
from pathlib import Path
|
| 679 |
+
before = get_local_model_list(directory_loras)
|
| 680 |
+
for url in [url.strip() for url in link_url.split(',')]:
|
| 681 |
+
if not Path(f"{directory_loras}/{url.split('/')[-1]}").exists():
|
| 682 |
+
download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
|
| 683 |
+
after = get_local_model_list(directory_loras)
|
| 684 |
+
new_files = list_sub(after, before)
|
| 685 |
+
for file in new_files:
|
| 686 |
+
path = Path(file)
|
| 687 |
+
if path.exists():
|
| 688 |
+
new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
|
| 689 |
+
path.resolve().rename(new_path.resolve())
|
| 690 |
+
update_lora_dict(str(new_path))
|
| 691 |
+
new_lora_model_list = get_lora_model_list()
|
| 692 |
+
new_lora_tupled_list = get_all_lora_tupled_list()
|
| 693 |
+
|
| 694 |
+
return gr.update(
|
| 695 |
+
choices=new_lora_tupled_list, value=new_lora_model_list[-1]
|
| 696 |
+
), gr.update(
|
| 697 |
+
choices=new_lora_tupled_list
|
| 698 |
+
), gr.update(
|
| 699 |
+
choices=new_lora_tupled_list
|
| 700 |
+
), gr.update(
|
| 701 |
+
choices=new_lora_tupled_list
|
| 702 |
+
), gr.update(
|
| 703 |
+
choices=new_lora_tupled_list
|
| 704 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 705 |
|
| 706 |
|
| 707 |
def upload_file_lora(files, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
| 735 |
)
|
| 736 |
|
| 737 |
|
| 738 |
+
def get_civitai_info(path):
|
| 739 |
+
global civitai_not_exists_list
|
| 740 |
+
global loras_url_to_path_dict
|
| 741 |
import requests
|
| 742 |
+
from requests.adapters import HTTPAdapter
|
| 743 |
from urllib3.util import Retry
|
| 744 |
+
default = ["", "", "", "", ""]
|
| 745 |
+
if path in set(civitai_not_exists_list): return default
|
| 746 |
+
if not Path(path).exists(): return None
|
| 747 |
+
user_agent = get_user_agent()
|
| 748 |
+
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
| 749 |
+
base_url = 'https://civitai.com/api/v1/model-versions/by-hash/'
|
| 750 |
+
params = {}
|
| 751 |
+
session = requests.Session()
|
| 752 |
+
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
|
| 753 |
+
session.mount("https://", HTTPAdapter(max_retries=retries))
|
| 754 |
+
import hashlib
|
| 755 |
+
with open(path, 'rb') as file:
|
| 756 |
+
file_data = file.read()
|
| 757 |
+
hash_sha256 = hashlib.sha256(file_data).hexdigest()
|
| 758 |
+
url = base_url + hash_sha256
|
| 759 |
+
try:
|
| 760 |
+
r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
|
| 761 |
+
except Exception as e:
|
| 762 |
+
return default
|
| 763 |
+
else:
|
| 764 |
+
if not r.ok: return None
|
| 765 |
+
json = r.json()
|
| 766 |
+
if not 'baseModel' in json:
|
| 767 |
+
civitai_not_exists_list.append(path)
|
| 768 |
+
return default
|
| 769 |
+
items = []
|
| 770 |
+
items.append(" / ".join(json['trainedWords'])) # The words (prompts) used to trigger the model
|
| 771 |
+
items.append(json['baseModel']) # Base model (SDXL1.0, Pony, ...)
|
| 772 |
+
items.append(json['model']['name']) # The name of the model version
|
| 773 |
+
items.append(f"https://civitai.com/models/{json['modelId']}") # The repo url for the model
|
| 774 |
+
items.append(json['images'][0]['url']) # The url for a sample image
|
| 775 |
+
loras_url_to_path_dict[path] = json['downloadUrl'] # The download url to get the model file for this specific version
|
| 776 |
+
return items
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1.0"], limit: int = 100):
|
| 780 |
+
import requests
|
| 781 |
from requests.adapters import HTTPAdapter
|
| 782 |
+
from urllib3.util import Retry
|
| 783 |
if not query: return None
|
| 784 |
user_agent = get_user_agent()
|
| 785 |
headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
|
| 786 |
base_url = 'https://civitai.com/api/v1/models'
|
| 787 |
params = {'query': query, 'types': ['LORA'], 'sort': 'Highest Rated', 'period': 'AllTime',
|
| 788 |
+
'nsfw': 'true', 'supportsGeneration ': 'true', 'limit': limit}
|
| 789 |
session = requests.Session()
|
| 790 |
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
|
| 791 |
session.mount("https://", HTTPAdapter(max_retries=retries))
|
|
|
|
| 793 |
r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(3.0, 30))
|
| 794 |
except Exception as e:
|
| 795 |
return None
|
| 796 |
+
else:
|
| 797 |
+
if not r.ok: return None
|
| 798 |
+
json = r.json()
|
| 799 |
+
if not 'items' in json: return None
|
| 800 |
+
items = []
|
| 801 |
+
for j in json['items']:
|
| 802 |
+
for model in j['modelVersions']:
|
| 803 |
+
item = {}
|
| 804 |
+
if not model['baseModel'] in set(allow_model): continue
|
| 805 |
+
item['name'] = j['name']
|
| 806 |
+
item['creator'] = j['creator']['username']
|
| 807 |
+
item['tags'] = j['tags']
|
| 808 |
+
item['model_name'] = model['name']
|
| 809 |
+
item['base_model'] = model['baseModel']
|
| 810 |
+
item['dl_url'] = model['downloadUrl']
|
| 811 |
+
item['md'] = f'<img src="{model["images"][0]["url"]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL](https://civitai.com/models/{j["id"]})'
|
| 812 |
+
items.append(item)
|
| 813 |
+
return items
|
|
|
|
|
|
|
| 814 |
|
| 815 |
|
| 816 |
def search_civitai_lora(query, base_model):
|
|
|
|
| 825 |
name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
|
| 826 |
value = item['dl_url']
|
| 827 |
choices.append((name, value))
|
| 828 |
+
civitai_lora_last_results[value] = item
|
| 829 |
if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
|
| 830 |
gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
|
| 831 |
+
result = civitai_lora_last_results.get(choices[0][1], "None")
|
| 832 |
+
md = result['md'] if result else ""
|
| 833 |
return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
|
| 834 |
gr.update(visible=True), gr.update(visible=True)
|
| 835 |
|
| 836 |
|
| 837 |
def select_civitai_lora(search_result):
|
| 838 |
if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
|
| 839 |
+
result = civitai_lora_last_results.get(search_result, "None")
|
| 840 |
+
md = result['md'] if result else ""
|
| 841 |
return gr.update(value=search_result), gr.update(value=md, visible=True)
|
| 842 |
|
| 843 |
|