Spaces:
Running
on
Zero
Running
on
Zero
Update utils.py
#11
by
John6666
- opened
- app.py +45 -63
- constants.py +18 -102
- requirements.txt +2 -4
- utils.py +2 -2
app.py
CHANGED
@@ -6,15 +6,12 @@ from stablepy import (
|
|
6 |
SCHEDULE_PREDICTION_TYPE_OPTIONS,
|
7 |
check_scheduler_compatibility,
|
8 |
TASK_AND_PREPROCESSORS,
|
9 |
-
FACE_RESTORATION_MODELS,
|
10 |
-
scheduler_names,
|
11 |
)
|
12 |
from constants import (
|
13 |
DIRECTORY_MODELS,
|
14 |
DIRECTORY_LORAS,
|
15 |
DIRECTORY_VAES,
|
16 |
DIRECTORY_EMBEDS,
|
17 |
-
DIRECTORY_UPSCALERS,
|
18 |
DOWNLOAD_MODEL,
|
19 |
DOWNLOAD_VAE,
|
20 |
DOWNLOAD_LORA,
|
@@ -38,12 +35,15 @@ from constants import (
|
|
38 |
EXAMPLES_GUI,
|
39 |
RESOURCES,
|
40 |
DIFFUSERS_CONTROLNET_MODEL,
|
41 |
-
IP_MODELS,
|
42 |
-
MODE_IP_OPTIONS,
|
43 |
)
|
44 |
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
|
45 |
import torch
|
46 |
import re
|
|
|
|
|
|
|
|
|
|
|
47 |
import time
|
48 |
from PIL import ImageFile
|
49 |
from utils import (
|
@@ -70,15 +70,13 @@ import warnings
|
|
70 |
from stablepy import logger
|
71 |
from diffusers import FluxPipeline
|
72 |
# import urllib.parse
|
73 |
-
import subprocess
|
74 |
|
75 |
-
subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
76 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
77 |
torch.backends.cuda.matmul.allow_tf32 = True
|
78 |
# os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
|
79 |
print(os.getenv("SPACES_ZERO_GPU"))
|
80 |
|
81 |
-
directories = [DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS
|
82 |
for directory in directories:
|
83 |
os.makedirs(directory, exist_ok=True)
|
84 |
|
@@ -121,8 +119,8 @@ flux_pipe = FluxPipeline.from_pretrained(
|
|
121 |
torch_dtype=torch.bfloat16,
|
122 |
).to("cuda")
|
123 |
components = flux_pipe.components
|
|
|
124 |
delete_model(flux_repo)
|
125 |
-
# components = None
|
126 |
|
127 |
#######################
|
128 |
# GUI
|
@@ -175,7 +173,6 @@ class GuiSD:
|
|
175 |
dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
|
176 |
|
177 |
if not os.path.exists(model_name):
|
178 |
-
print("debug", model_name, vae_model, task, controlnet_model)
|
179 |
_ = download_diffuser_repo(
|
180 |
repo_name=model_name,
|
181 |
model_type=model_type,
|
@@ -201,7 +198,10 @@ class GuiSD:
|
|
201 |
yield f"Loading model: {model_name}"
|
202 |
|
203 |
if vae_model == "BakedVAE":
|
204 |
-
|
|
|
|
|
|
|
205 |
elif vae_model:
|
206 |
vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
|
207 |
if model_type != vae_type:
|
@@ -310,8 +310,8 @@ class GuiSD:
|
|
310 |
syntax_weights,
|
311 |
upscaler_model_path,
|
312 |
upscaler_increases_size,
|
313 |
-
|
314 |
-
|
315 |
hires_steps,
|
316 |
hires_denoising_strength,
|
317 |
hires_sampler,
|
@@ -375,9 +375,6 @@ class GuiSD:
|
|
375 |
mode_ip2,
|
376 |
scale_ip2,
|
377 |
pag_scale,
|
378 |
-
face_restoration_model,
|
379 |
-
face_restoration_visibility,
|
380 |
-
face_restoration_weight,
|
381 |
):
|
382 |
info_state = html_template_message("Navigating latent space...")
|
383 |
yield info_state, gr.update(), gr.update()
|
@@ -416,20 +413,23 @@ class GuiSD:
|
|
416 |
self.model.stream_config(concurrency=concurrency, latent_resize_by=1, vae_decoding=False)
|
417 |
|
418 |
if task != "txt2img" and not image_control:
|
419 |
-
raise ValueError("
|
420 |
|
421 |
-
if task
|
422 |
-
raise ValueError("
|
423 |
|
424 |
-
if
|
425 |
upscaler_model = upscaler_model_path
|
426 |
else:
|
|
|
|
|
|
|
427 |
url_upscaler = UPSCALER_DICT_GUI[upscaler_model_path]
|
428 |
|
429 |
-
if not os.path.exists(f"./
|
430 |
-
download_things(
|
431 |
|
432 |
-
upscaler_model = f"./
|
433 |
|
434 |
logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
|
435 |
|
@@ -531,8 +531,8 @@ class GuiSD:
|
|
531 |
"t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
|
532 |
"upscaler_model_path": upscaler_model,
|
533 |
"upscaler_increases_size": upscaler_increases_size,
|
534 |
-
"
|
535 |
-
"
|
536 |
"hires_steps": hires_steps,
|
537 |
"hires_denoising_strength": hires_denoising_strength,
|
538 |
"hires_prompt": hires_prompt,
|
@@ -547,9 +547,6 @@ class GuiSD:
|
|
547 |
"ip_adapter_model": params_ip_model,
|
548 |
"ip_adapter_mode": params_ip_mode,
|
549 |
"ip_adapter_scale": params_ip_scale,
|
550 |
-
"face_restoration_model": face_restoration_model,
|
551 |
-
"face_restoration_visibility": face_restoration_visibility,
|
552 |
-
"face_restoration_weight": face_restoration_weight,
|
553 |
}
|
554 |
|
555 |
# kwargs for diffusers pipeline
|
@@ -697,26 +694,22 @@ def sd_gen_generate_pipeline(*args):
|
|
697 |
|
698 |
|
699 |
@spaces.GPU(duration=15)
|
700 |
-
def
|
701 |
if image is None: return None
|
702 |
|
703 |
from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
|
704 |
-
from stablepy import
|
705 |
|
706 |
-
image = image.convert("RGB")
|
707 |
exif_image = extract_exif_data(image)
|
708 |
|
709 |
-
|
710 |
-
|
711 |
-
|
712 |
-
|
713 |
-
|
714 |
-
download_things(DIRECTORY_UPSCALERS, name_upscaler, HF_TOKEN)
|
715 |
|
716 |
-
|
717 |
-
|
718 |
-
scaler_beta = load_upscaler_model(model=name_upscaler, tile=0, tile_overlap=8, device="cuda", half=True)
|
719 |
-
image_up = scaler_beta.upscale(image, upscaler_size, True)
|
720 |
|
721 |
image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
|
722 |
|
@@ -907,8 +900,8 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
907 |
|
908 |
upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
|
909 |
upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
|
910 |
-
|
911 |
-
|
912 |
hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
|
913 |
hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
|
914 |
hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
|
@@ -957,16 +950,11 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
957 |
[lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, lora6_gui, lora7_gui, new_lora_status]
|
958 |
)
|
959 |
|
960 |
-
with gr.Accordion("Face restoration", open=False, visible=True):
|
961 |
-
|
962 |
-
face_rest_options = [None] + FACE_RESTORATION_MODELS
|
963 |
-
|
964 |
-
face_restoration_model_gui = gr.Dropdown(label="Face restoration model", choices=face_rest_options, value=face_rest_options[0])
|
965 |
-
face_restoration_visibility_gui = gr.Slider(minimum=0., maximum=1., step=0.001, value=1., label="Visibility")
|
966 |
-
face_restoration_weight_gui = gr.Slider(minimum=0., maximum=1., step=0.001, value=.5, label="Weight", info="(0 = maximum effect, 1 = minimum effect)")
|
967 |
-
|
968 |
with gr.Accordion("IP-Adapter", open=False, visible=True):
|
969 |
|
|
|
|
|
|
|
970 |
with gr.Accordion("IP-Adapter 1", open=False, visible=True):
|
971 |
image_ip1 = gr.Image(label="IP Image", type="filepath")
|
972 |
mask_ip1 = gr.Image(label="IP Mask", type="filepath")
|
@@ -985,13 +973,13 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
985 |
image_mask_gui = gr.Image(label="Image Mask", type="filepath")
|
986 |
strength_gui = gr.Slider(
|
987 |
minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
|
988 |
-
info="This option adjusts the level of changes for img2img
|
989 |
)
|
990 |
image_resolution_gui = gr.Slider(
|
991 |
minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
|
992 |
info="The maximum proportional size of the generated image based on the uploaded image."
|
993 |
)
|
994 |
-
controlnet_model_gui = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0]
|
995 |
control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
|
996 |
control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
|
997 |
control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
|
@@ -1202,11 +1190,8 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
1202 |
|
1203 |
with gr.Row():
|
1204 |
with gr.Column():
|
1205 |
-
|
1206 |
-
USCALER_TAB_KEYS = [name for name in UPSCALER_KEYS[9:]]
|
1207 |
-
|
1208 |
image_up_tab = gr.Image(label="Image", type="pil", sources=["upload"])
|
1209 |
-
upscaler_tab = gr.Dropdown(label="Upscaler", choices=
|
1210 |
upscaler_size_tab = gr.Slider(minimum=1., maximum=4., step=0.1, value=1.1, label="Upscale by")
|
1211 |
generate_button_up_tab = gr.Button(value="START UPSCALE", variant="primary")
|
1212 |
|
@@ -1214,7 +1199,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
1214 |
result_up_tab = gr.Image(label="Result", type="pil", interactive=False, format="png")
|
1215 |
|
1216 |
generate_button_up_tab.click(
|
1217 |
-
fn=
|
1218 |
inputs=[image_up_tab, upscaler_tab, upscaler_size_tab],
|
1219 |
outputs=[result_up_tab],
|
1220 |
)
|
@@ -1286,8 +1271,8 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
1286 |
prompt_syntax_gui,
|
1287 |
upscaler_model_path_gui,
|
1288 |
upscaler_increases_size_gui,
|
1289 |
-
|
1290 |
-
|
1291 |
hires_steps_gui,
|
1292 |
hires_denoising_strength_gui,
|
1293 |
hires_sampler_gui,
|
@@ -1351,9 +1336,6 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
1351 |
mode_ip2,
|
1352 |
scale_ip2,
|
1353 |
pag_scale_gui,
|
1354 |
-
face_restoration_model_gui,
|
1355 |
-
face_restoration_visibility_gui,
|
1356 |
-
face_restoration_weight_gui,
|
1357 |
load_lora_cpu_gui,
|
1358 |
verbose_info_gui,
|
1359 |
gpu_duration_gui,
|
|
|
6 |
SCHEDULE_PREDICTION_TYPE_OPTIONS,
|
7 |
check_scheduler_compatibility,
|
8 |
TASK_AND_PREPROCESSORS,
|
|
|
|
|
9 |
)
|
10 |
from constants import (
|
11 |
DIRECTORY_MODELS,
|
12 |
DIRECTORY_LORAS,
|
13 |
DIRECTORY_VAES,
|
14 |
DIRECTORY_EMBEDS,
|
|
|
15 |
DOWNLOAD_MODEL,
|
16 |
DOWNLOAD_VAE,
|
17 |
DOWNLOAD_LORA,
|
|
|
35 |
EXAMPLES_GUI,
|
36 |
RESOURCES,
|
37 |
DIFFUSERS_CONTROLNET_MODEL,
|
|
|
|
|
38 |
)
|
39 |
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
|
40 |
import torch
|
41 |
import re
|
42 |
+
from stablepy import (
|
43 |
+
scheduler_names,
|
44 |
+
IP_ADAPTERS_SD,
|
45 |
+
IP_ADAPTERS_SDXL,
|
46 |
+
)
|
47 |
import time
|
48 |
from PIL import ImageFile
|
49 |
from utils import (
|
|
|
70 |
from stablepy import logger
|
71 |
from diffusers import FluxPipeline
|
72 |
# import urllib.parse
|
|
|
73 |
|
|
|
74 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
75 |
torch.backends.cuda.matmul.allow_tf32 = True
|
76 |
# os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
|
77 |
print(os.getenv("SPACES_ZERO_GPU"))
|
78 |
|
79 |
+
directories = [DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS]
|
80 |
for directory in directories:
|
81 |
os.makedirs(directory, exist_ok=True)
|
82 |
|
|
|
119 |
torch_dtype=torch.bfloat16,
|
120 |
).to("cuda")
|
121 |
components = flux_pipe.components
|
122 |
+
components.pop("transformer", None)
|
123 |
delete_model(flux_repo)
|
|
|
124 |
|
125 |
#######################
|
126 |
# GUI
|
|
|
173 |
dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
|
174 |
|
175 |
if not os.path.exists(model_name):
|
|
|
176 |
_ = download_diffuser_repo(
|
177 |
repo_name=model_name,
|
178 |
model_type=model_type,
|
|
|
198 |
yield f"Loading model: {model_name}"
|
199 |
|
200 |
if vae_model == "BakedVAE":
|
201 |
+
if not os.path.exists(model_name):
|
202 |
+
vae_model = model_name
|
203 |
+
else:
|
204 |
+
vae_model = None
|
205 |
elif vae_model:
|
206 |
vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
|
207 |
if model_type != vae_type:
|
|
|
310 |
syntax_weights,
|
311 |
upscaler_model_path,
|
312 |
upscaler_increases_size,
|
313 |
+
esrgan_tile,
|
314 |
+
esrgan_tile_overlap,
|
315 |
hires_steps,
|
316 |
hires_denoising_strength,
|
317 |
hires_sampler,
|
|
|
375 |
mode_ip2,
|
376 |
scale_ip2,
|
377 |
pag_scale,
|
|
|
|
|
|
|
378 |
):
|
379 |
info_state = html_template_message("Navigating latent space...")
|
380 |
yield info_state, gr.update(), gr.update()
|
|
|
413 |
self.model.stream_config(concurrency=concurrency, latent_resize_by=1, vae_decoding=False)
|
414 |
|
415 |
if task != "txt2img" and not image_control:
|
416 |
+
raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
|
417 |
|
418 |
+
if task == "inpaint" and not image_mask:
|
419 |
+
raise ValueError("No mask image found: Specify one in 'Image Mask'")
|
420 |
|
421 |
+
if upscaler_model_path in UPSCALER_KEYS[:9]:
|
422 |
upscaler_model = upscaler_model_path
|
423 |
else:
|
424 |
+
directory_upscalers = 'upscalers'
|
425 |
+
os.makedirs(directory_upscalers, exist_ok=True)
|
426 |
+
|
427 |
url_upscaler = UPSCALER_DICT_GUI[upscaler_model_path]
|
428 |
|
429 |
+
if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
|
430 |
+
download_things(directory_upscalers, url_upscaler, HF_TOKEN)
|
431 |
|
432 |
+
upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
|
433 |
|
434 |
logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
|
435 |
|
|
|
531 |
"t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
|
532 |
"upscaler_model_path": upscaler_model,
|
533 |
"upscaler_increases_size": upscaler_increases_size,
|
534 |
+
"esrgan_tile": esrgan_tile,
|
535 |
+
"esrgan_tile_overlap": esrgan_tile_overlap,
|
536 |
"hires_steps": hires_steps,
|
537 |
"hires_denoising_strength": hires_denoising_strength,
|
538 |
"hires_prompt": hires_prompt,
|
|
|
547 |
"ip_adapter_model": params_ip_model,
|
548 |
"ip_adapter_mode": params_ip_mode,
|
549 |
"ip_adapter_scale": params_ip_scale,
|
|
|
|
|
|
|
550 |
}
|
551 |
|
552 |
# kwargs for diffusers pipeline
|
|
|
694 |
|
695 |
|
696 |
@spaces.GPU(duration=15)
|
697 |
+
def esrgan_upscale(image, upscaler_name, upscaler_size):
|
698 |
if image is None: return None
|
699 |
|
700 |
from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
|
701 |
+
from stablepy import UpscalerESRGAN
|
702 |
|
|
|
703 |
exif_image = extract_exif_data(image)
|
704 |
|
705 |
+
url_upscaler = UPSCALER_DICT_GUI[upscaler_name]
|
706 |
+
directory_upscalers = 'upscalers'
|
707 |
+
os.makedirs(directory_upscalers, exist_ok=True)
|
708 |
+
if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
|
709 |
+
download_things(directory_upscalers, url_upscaler, HF_TOKEN)
|
|
|
710 |
|
711 |
+
scaler_beta = UpscalerESRGAN(0, 0)
|
712 |
+
image_up = scaler_beta.upscale(image, upscaler_size, f"./upscalers/{url_upscaler.split('/')[-1]}")
|
|
|
|
|
713 |
|
714 |
image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
|
715 |
|
|
|
900 |
|
901 |
upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
|
902 |
upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
|
903 |
+
esrgan_tile_gui = gr.Slider(minimum=0, value=0, maximum=500, step=1, label="ESRGAN Tile")
|
904 |
+
esrgan_tile_overlap_gui = gr.Slider(minimum=1, maximum=200, step=1, value=8, label="ESRGAN Tile Overlap")
|
905 |
hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
|
906 |
hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
|
907 |
hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
|
|
|
950 |
[lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, lora6_gui, lora7_gui, new_lora_status]
|
951 |
)
|
952 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
953 |
with gr.Accordion("IP-Adapter", open=False, visible=True):
|
954 |
|
955 |
+
IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
|
956 |
+
MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
|
957 |
+
|
958 |
with gr.Accordion("IP-Adapter 1", open=False, visible=True):
|
959 |
image_ip1 = gr.Image(label="IP Image", type="filepath")
|
960 |
mask_ip1 = gr.Image(label="IP Mask", type="filepath")
|
|
|
973 |
image_mask_gui = gr.Image(label="Image Mask", type="filepath")
|
974 |
strength_gui = gr.Slider(
|
975 |
minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
|
976 |
+
info="This option adjusts the level of changes for img2img and inpainting."
|
977 |
)
|
978 |
image_resolution_gui = gr.Slider(
|
979 |
minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
|
980 |
info="The maximum proportional size of the generated image based on the uploaded image."
|
981 |
)
|
982 |
+
controlnet_model_gui = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0])
|
983 |
control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
|
984 |
control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
|
985 |
control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
|
|
|
1190 |
|
1191 |
with gr.Row():
|
1192 |
with gr.Column():
|
|
|
|
|
|
|
1193 |
image_up_tab = gr.Image(label="Image", type="pil", sources=["upload"])
|
1194 |
+
upscaler_tab = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS[9:], value=UPSCALER_KEYS[11])
|
1195 |
upscaler_size_tab = gr.Slider(minimum=1., maximum=4., step=0.1, value=1.1, label="Upscale by")
|
1196 |
generate_button_up_tab = gr.Button(value="START UPSCALE", variant="primary")
|
1197 |
|
|
|
1199 |
result_up_tab = gr.Image(label="Result", type="pil", interactive=False, format="png")
|
1200 |
|
1201 |
generate_button_up_tab.click(
|
1202 |
+
fn=esrgan_upscale,
|
1203 |
inputs=[image_up_tab, upscaler_tab, upscaler_size_tab],
|
1204 |
outputs=[result_up_tab],
|
1205 |
)
|
|
|
1271 |
prompt_syntax_gui,
|
1272 |
upscaler_model_path_gui,
|
1273 |
upscaler_increases_size_gui,
|
1274 |
+
esrgan_tile_gui,
|
1275 |
+
esrgan_tile_overlap_gui,
|
1276 |
hires_steps_gui,
|
1277 |
hires_denoising_strength_gui,
|
1278 |
hires_sampler_gui,
|
|
|
1336 |
mode_ip2,
|
1337 |
scale_ip2,
|
1338 |
pag_scale_gui,
|
|
|
|
|
|
|
1339 |
load_lora_cpu_gui,
|
1340 |
verbose_info_gui,
|
1341 |
gpu_duration_gui,
|
constants.py
CHANGED
@@ -4,9 +4,6 @@ from stablepy import (
|
|
4 |
scheduler_names,
|
5 |
SD15_TASKS,
|
6 |
SDXL_TASKS,
|
7 |
-
ALL_BUILTIN_UPSCALERS,
|
8 |
-
IP_ADAPTERS_SD,
|
9 |
-
IP_ADAPTERS_SDXL,
|
10 |
)
|
11 |
|
12 |
# - **Download Models**
|
@@ -21,7 +18,6 @@ DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book
|
|
21 |
LOAD_DIFFUSERS_FORMAT_MODEL = [
|
22 |
'stabilityai/stable-diffusion-xl-base-1.0',
|
23 |
'Laxhar/noobai-XL-1.1',
|
24 |
-
'Laxhar/noobai-XL-Vpred-1.0',
|
25 |
'black-forest-labs/FLUX.1-dev',
|
26 |
'John6666/blue-pencil-flux1-v021-fp8-flux',
|
27 |
'John6666/wai-ani-flux-v10forfp8-fp8-flux',
|
@@ -33,8 +29,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
|
|
33 |
'shauray/FluxDev-HyperSD-merged',
|
34 |
'mikeyandfriends/PixelWave_FLUX.1-dev_03',
|
35 |
'terminusresearch/FluxBooru-v0.3',
|
36 |
-
'
|
37 |
-
# 'ostris/OpenFLUX.1',
|
38 |
'shuttleai/shuttle-3-diffusion',
|
39 |
'Laxhar/noobai-XL-1.0',
|
40 |
'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
|
@@ -48,22 +43,12 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
|
|
48 |
'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
|
49 |
'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
|
50 |
'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
|
51 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
|
52 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
|
53 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
|
54 |
-
'John6666/mistoon-anime-v10illustrious-sdxl',
|
55 |
-
'John6666/hassaku-xl-illustrious-v10-sdxl',
|
56 |
-
'John6666/hassaku-xl-illustrious-v10style-sdxl',
|
57 |
'John6666/haruki-mix-illustrious-v10-sdxl',
|
58 |
'John6666/noobreal-v10-sdxl',
|
59 |
'John6666/complicated-noobai-merge-vprediction-sdxl',
|
60 |
-
'Laxhar/noobai-XL-Vpred-0.9r',
|
61 |
-
'Laxhar/noobai-XL-Vpred-0.75s',
|
62 |
-
'Laxhar/noobai-XL-Vpred-0.75',
|
63 |
'Laxhar/noobai-XL-Vpred-0.65s',
|
64 |
'Laxhar/noobai-XL-Vpred-0.65',
|
65 |
'Laxhar/noobai-XL-Vpred-0.6',
|
66 |
-
'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
|
67 |
'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
|
68 |
'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
|
69 |
'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
|
@@ -73,7 +58,6 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
|
|
73 |
'John6666/illustrious-pencil-xl-v200-sdxl',
|
74 |
'John6666/obsession-illustriousxl-v21-sdxl',
|
75 |
'John6666/obsession-illustriousxl-v30-sdxl',
|
76 |
-
'John6666/obsession-illustriousxl-v31-sdxl',
|
77 |
'John6666/wai-nsfw-illustrious-v70-sdxl',
|
78 |
'John6666/illustrious-pony-mix-v3-sdxl',
|
79 |
'John6666/nova-anime-xl-illustriousv10-sdxl',
|
@@ -84,7 +68,6 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
|
|
84 |
'John6666/meinaxl-v2-sdxl',
|
85 |
'Eugeoter/artiwaifu-diffusion-2.0',
|
86 |
'comin/IterComp',
|
87 |
-
'John6666/epicrealism-xl-vxiabeast-sdxl',
|
88 |
'John6666/epicrealism-xl-v10kiss2-sdxl',
|
89 |
'John6666/epicrealism-xl-v8kiss-sdxl',
|
90 |
'misri/zavychromaxl_v80',
|
@@ -98,7 +81,6 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
|
|
98 |
'John6666/ras-real-anime-screencap-v1-sdxl',
|
99 |
'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
|
100 |
'John6666/mistoon-anime-ponyalpha-sdxl',
|
101 |
-
'John6666/mistoon-xl-copper-v20fast-sdxl',
|
102 |
'John6666/ebara-mfcg-pony-mix-v12-sdxl',
|
103 |
'John6666/t-ponynai3-v51-sdxl',
|
104 |
'John6666/t-ponynai3-v65-sdxl',
|
@@ -117,18 +99,13 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
|
|
117 |
'John6666/cyberrealistic-pony-v63-sdxl',
|
118 |
'John6666/cyberrealistic-pony-v64-sdxl',
|
119 |
'John6666/cyberrealistic-pony-v65-sdxl',
|
120 |
-
'John6666/cyberrealistic-pony-v7-sdxl',
|
121 |
'GraydientPlatformAPI/realcartoon-pony-diffusion',
|
122 |
'John6666/nova-anime-xl-pony-v5-sdxl',
|
123 |
'John6666/autismmix-sdxl-autismmix-pony-sdxl',
|
124 |
'John6666/aimz-dream-real-pony-mix-v3-sdxl',
|
125 |
-
'John6666/prefectious-xl-nsfw-v10-sdxl',
|
126 |
-
'GraydientPlatformAPI/iniverseponyRealGuofeng49',
|
127 |
'John6666/duchaiten-pony-real-v11fix-sdxl',
|
128 |
'John6666/duchaiten-pony-real-v20-sdxl',
|
129 |
'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
|
130 |
-
'Spestly/OdysseyXL-3.0',
|
131 |
-
'Spestly/OdysseyXL-4.0',
|
132 |
'KBlueLeaf/Kohaku-XL-Zeta',
|
133 |
'cagliostrolab/animagine-xl-3.1',
|
134 |
'yodayo-ai/kivotos-xl-2.0',
|
@@ -143,9 +120,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
|
|
143 |
'digiplay/darkphoenix3D_v1.1',
|
144 |
'digiplay/BeenYouLiteL11_diffusers',
|
145 |
'GraydientPlatformAPI/rev-animated2',
|
146 |
-
'
|
147 |
-
'GraydientPlatformAPI/cyberreal6',
|
148 |
-
'GraydientPlatformAPI/cyberreal5',
|
149 |
'youknownothing/deliberate-v6',
|
150 |
'GraydientPlatformAPI/deliberate-cyber3',
|
151 |
'GraydientPlatformAPI/picx-real',
|
@@ -181,7 +156,6 @@ DIRECTORY_MODELS = 'models'
|
|
181 |
DIRECTORY_LORAS = 'loras'
|
182 |
DIRECTORY_VAES = 'vaes'
|
183 |
DIRECTORY_EMBEDS = 'embedings'
|
184 |
-
DIRECTORY_UPSCALERS = 'upscalers'
|
185 |
|
186 |
CACHE_HF = "/home/user/.cache/huggingface/hub/"
|
187 |
STORAGE_ROOT = "/home/user/"
|
@@ -210,21 +184,27 @@ TASK_STABLEPY = {
|
|
210 |
'optical pattern ControlNet': 'pattern',
|
211 |
'recolor ControlNet': 'recolor',
|
212 |
'tile ControlNet': 'tile',
|
213 |
-
'repaint ControlNet': 'repaint',
|
214 |
}
|
215 |
|
216 |
TASK_MODEL_LIST = list(TASK_STABLEPY.keys())
|
217 |
|
218 |
UPSCALER_DICT_GUI = {
|
219 |
None: None,
|
220 |
-
|
221 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
"RealESRNet_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
"4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
|
229 |
"4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
|
230 |
"Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
|
@@ -239,7 +219,6 @@ UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
|
|
239 |
DIFFUSERS_CONTROLNET_MODEL = [
|
240 |
"Automatic",
|
241 |
|
242 |
-
"brad-twinkl/controlnet-union-sdxl-1.0-promax",
|
243 |
"xinsir/controlnet-union-sdxl-1.0",
|
244 |
"xinsir/anime-painter",
|
245 |
"Eugeoter/noob-sdxl-controlnet-canny",
|
@@ -262,6 +241,7 @@ DIFFUSERS_CONTROLNET_MODEL = [
|
|
262 |
"r3gm/controlnet-recolor-sdxl-fp16",
|
263 |
"r3gm/controlnet-openpose-twins-sdxl-1.0-fp16",
|
264 |
"r3gm/controlnet-qr-pattern-sdxl-fp16",
|
|
|
265 |
"Yakonrus/SDXL_Controlnet_Tile_Realistic_v2",
|
266 |
"TheMistoAI/MistoLine",
|
267 |
"briaai/BRIA-2.3-ControlNet-Recoloring",
|
@@ -340,20 +320,6 @@ POST_PROCESSING_SAMPLER = ["Use same sampler"] + [
|
|
340 |
name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
|
341 |
]
|
342 |
|
343 |
-
IP_MODELS = []
|
344 |
-
ALL_IPA = sorted(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL))
|
345 |
-
|
346 |
-
for origin_name in ALL_IPA:
|
347 |
-
suffixes = []
|
348 |
-
if origin_name in IP_ADAPTERS_SD:
|
349 |
-
suffixes.append("sd1.5")
|
350 |
-
if origin_name in IP_ADAPTERS_SDXL:
|
351 |
-
suffixes.append("sdxl")
|
352 |
-
ref_name = f"{origin_name} ({'/'.join(suffixes)})"
|
353 |
-
IP_MODELS.append((ref_name, origin_name))
|
354 |
-
|
355 |
-
MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
|
356 |
-
|
357 |
SUBTITLE_GUI = (
|
358 |
"### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
|
359 |
" to perform different tasks in image generation."
|
@@ -374,9 +340,7 @@ EXAMPLES_GUI_HELP = (
|
|
374 |
3. ControlNet Canny SDXL
|
375 |
4. Optical pattern (Optical illusion) SDXL
|
376 |
5. Convert an image to a coloring drawing
|
377 |
-
6.
|
378 |
-
7. V prediction model sd_embed variant inference
|
379 |
-
8. ControlNet OpenPose SD 1.5 and Latent upscale
|
380 |
|
381 |
- Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
|
382 |
"""
|
@@ -503,54 +467,6 @@ EXAMPLES_GUI = [
|
|
503 |
35,
|
504 |
False,
|
505 |
],
|
506 |
-
[
|
507 |
-
"[mochizuki_shiina], [syuri22], newest, reimu, solo, outdoors, water, flower, lantern",
|
508 |
-
"worst quality, normal quality, old, sketch,",
|
509 |
-
28,
|
510 |
-
7.0,
|
511 |
-
-1,
|
512 |
-
"None",
|
513 |
-
0.33,
|
514 |
-
"DPM 3M Ef",
|
515 |
-
1600,
|
516 |
-
1024,
|
517 |
-
"Laxhar/noobai-XL-Vpred-1.0",
|
518 |
-
"txt2img",
|
519 |
-
"color_image.png", # img conttol
|
520 |
-
1024, # img resolution
|
521 |
-
0.35, # strength
|
522 |
-
1.0, # cn scale
|
523 |
-
0.0, # cn start
|
524 |
-
1.0, # cn end
|
525 |
-
"Classic",
|
526 |
-
None,
|
527 |
-
30,
|
528 |
-
False,
|
529 |
-
],
|
530 |
-
[
|
531 |
-
"[mochizuki_shiina], [syuri22], newest, multiple girls, 2girls, earrings, jewelry, gloves, purple eyes, black hair, looking at viewer, nail polish, hat, smile, open mouth, fingerless gloves, sleeveless, :d, upper body, blue eyes, closed mouth, black gloves, hands up, long hair, shirt, bare shoulders, white headwear, blush, black headwear, blue nails, upper teeth only, short hair, white gloves, white shirt, teeth, rabbit hat, star earrings, purple nails, pink hair, detached sleeves, fingernails, fake animal ears, animal hat, sleeves past wrists, black shirt, medium hair, fur trim, sleeveless shirt, turtleneck, long sleeves, rabbit ears, star \\(symbol\\)",
|
532 |
-
"worst quality, normal quality, old, sketch,",
|
533 |
-
28,
|
534 |
-
7.0,
|
535 |
-
-1,
|
536 |
-
"None",
|
537 |
-
0.33,
|
538 |
-
"DPM 3M Ef",
|
539 |
-
1600,
|
540 |
-
1024,
|
541 |
-
"Laxhar/noobai-XL-Vpred-1.0",
|
542 |
-
"txt2img",
|
543 |
-
"color_image.png", # img conttol
|
544 |
-
1024, # img resolution
|
545 |
-
0.35, # strength
|
546 |
-
1.0, # cn scale
|
547 |
-
0.0, # cn start
|
548 |
-
1.0, # cn end
|
549 |
-
"Classic-sd_embed",
|
550 |
-
None,
|
551 |
-
30,
|
552 |
-
False,
|
553 |
-
],
|
554 |
[
|
555 |
"1girl,face,curly hair,red hair,white background,",
|
556 |
"(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
|
|
|
4 |
scheduler_names,
|
5 |
SD15_TASKS,
|
6 |
SDXL_TASKS,
|
|
|
|
|
|
|
7 |
)
|
8 |
|
9 |
# - **Download Models**
|
|
|
18 |
LOAD_DIFFUSERS_FORMAT_MODEL = [
|
19 |
'stabilityai/stable-diffusion-xl-base-1.0',
|
20 |
'Laxhar/noobai-XL-1.1',
|
|
|
21 |
'black-forest-labs/FLUX.1-dev',
|
22 |
'John6666/blue-pencil-flux1-v021-fp8-flux',
|
23 |
'John6666/wai-ani-flux-v10forfp8-fp8-flux',
|
|
|
29 |
'shauray/FluxDev-HyperSD-merged',
|
30 |
'mikeyandfriends/PixelWave_FLUX.1-dev_03',
|
31 |
'terminusresearch/FluxBooru-v0.3',
|
32 |
+
'ostris/OpenFLUX.1',
|
|
|
33 |
'shuttleai/shuttle-3-diffusion',
|
34 |
'Laxhar/noobai-XL-1.0',
|
35 |
'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
|
|
|
43 |
'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
|
44 |
'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
|
45 |
'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
'John6666/haruki-mix-illustrious-v10-sdxl',
|
47 |
'John6666/noobreal-v10-sdxl',
|
48 |
'John6666/complicated-noobai-merge-vprediction-sdxl',
|
|
|
|
|
|
|
49 |
'Laxhar/noobai-XL-Vpred-0.65s',
|
50 |
'Laxhar/noobai-XL-Vpred-0.65',
|
51 |
'Laxhar/noobai-XL-Vpred-0.6',
|
|
|
52 |
'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
|
53 |
'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
|
54 |
'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
|
|
|
58 |
'John6666/illustrious-pencil-xl-v200-sdxl',
|
59 |
'John6666/obsession-illustriousxl-v21-sdxl',
|
60 |
'John6666/obsession-illustriousxl-v30-sdxl',
|
|
|
61 |
'John6666/wai-nsfw-illustrious-v70-sdxl',
|
62 |
'John6666/illustrious-pony-mix-v3-sdxl',
|
63 |
'John6666/nova-anime-xl-illustriousv10-sdxl',
|
|
|
68 |
'John6666/meinaxl-v2-sdxl',
|
69 |
'Eugeoter/artiwaifu-diffusion-2.0',
|
70 |
'comin/IterComp',
|
|
|
71 |
'John6666/epicrealism-xl-v10kiss2-sdxl',
|
72 |
'John6666/epicrealism-xl-v8kiss-sdxl',
|
73 |
'misri/zavychromaxl_v80',
|
|
|
81 |
'John6666/ras-real-anime-screencap-v1-sdxl',
|
82 |
'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
|
83 |
'John6666/mistoon-anime-ponyalpha-sdxl',
|
|
|
84 |
'John6666/ebara-mfcg-pony-mix-v12-sdxl',
|
85 |
'John6666/t-ponynai3-v51-sdxl',
|
86 |
'John6666/t-ponynai3-v65-sdxl',
|
|
|
99 |
'John6666/cyberrealistic-pony-v63-sdxl',
|
100 |
'John6666/cyberrealistic-pony-v64-sdxl',
|
101 |
'John6666/cyberrealistic-pony-v65-sdxl',
|
|
|
102 |
'GraydientPlatformAPI/realcartoon-pony-diffusion',
|
103 |
'John6666/nova-anime-xl-pony-v5-sdxl',
|
104 |
'John6666/autismmix-sdxl-autismmix-pony-sdxl',
|
105 |
'John6666/aimz-dream-real-pony-mix-v3-sdxl',
|
|
|
|
|
106 |
'John6666/duchaiten-pony-real-v11fix-sdxl',
|
107 |
'John6666/duchaiten-pony-real-v20-sdxl',
|
108 |
'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
|
|
|
|
|
109 |
'KBlueLeaf/Kohaku-XL-Zeta',
|
110 |
'cagliostrolab/animagine-xl-3.1',
|
111 |
'yodayo-ai/kivotos-xl-2.0',
|
|
|
120 |
'digiplay/darkphoenix3D_v1.1',
|
121 |
'digiplay/BeenYouLiteL11_diffusers',
|
122 |
'GraydientPlatformAPI/rev-animated2',
|
123 |
+
'youknownothing/cyberrealistic_v50',
|
|
|
|
|
124 |
'youknownothing/deliberate-v6',
|
125 |
'GraydientPlatformAPI/deliberate-cyber3',
|
126 |
'GraydientPlatformAPI/picx-real',
|
|
|
156 |
DIRECTORY_LORAS = 'loras'
|
157 |
DIRECTORY_VAES = 'vaes'
|
158 |
DIRECTORY_EMBEDS = 'embedings'
|
|
|
159 |
|
160 |
CACHE_HF = "/home/user/.cache/huggingface/hub/"
|
161 |
STORAGE_ROOT = "/home/user/"
|
|
|
184 |
'optical pattern ControlNet': 'pattern',
|
185 |
'recolor ControlNet': 'recolor',
|
186 |
'tile ControlNet': 'tile',
|
|
|
187 |
}
|
188 |
|
189 |
TASK_MODEL_LIST = list(TASK_STABLEPY.keys())
|
190 |
|
191 |
UPSCALER_DICT_GUI = {
|
192 |
None: None,
|
193 |
+
"Lanczos": "Lanczos",
|
194 |
+
"Nearest": "Nearest",
|
195 |
+
'Latent': 'Latent',
|
196 |
+
'Latent (antialiased)': 'Latent (antialiased)',
|
197 |
+
'Latent (bicubic)': 'Latent (bicubic)',
|
198 |
+
'Latent (bicubic antialiased)': 'Latent (bicubic antialiased)',
|
199 |
+
'Latent (nearest)': 'Latent (nearest)',
|
200 |
+
'Latent (nearest-exact)': 'Latent (nearest-exact)',
|
201 |
+
"RealESRGAN_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
|
202 |
"RealESRNet_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
|
203 |
+
"RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
|
204 |
+
"RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
|
205 |
+
"realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
|
206 |
+
"realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
|
207 |
+
"realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
|
208 |
"4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
|
209 |
"4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
|
210 |
"Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
|
|
|
219 |
DIFFUSERS_CONTROLNET_MODEL = [
|
220 |
"Automatic",
|
221 |
|
|
|
222 |
"xinsir/controlnet-union-sdxl-1.0",
|
223 |
"xinsir/anime-painter",
|
224 |
"Eugeoter/noob-sdxl-controlnet-canny",
|
|
|
241 |
"r3gm/controlnet-recolor-sdxl-fp16",
|
242 |
"r3gm/controlnet-openpose-twins-sdxl-1.0-fp16",
|
243 |
"r3gm/controlnet-qr-pattern-sdxl-fp16",
|
244 |
+
"brad-twinkl/controlnet-union-sdxl-1.0-promax",
|
245 |
"Yakonrus/SDXL_Controlnet_Tile_Realistic_v2",
|
246 |
"TheMistoAI/MistoLine",
|
247 |
"briaai/BRIA-2.3-ControlNet-Recoloring",
|
|
|
320 |
name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
|
321 |
]
|
322 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
323 |
SUBTITLE_GUI = (
|
324 |
"### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
|
325 |
" to perform different tasks in image generation."
|
|
|
340 |
3. ControlNet Canny SDXL
|
341 |
4. Optical pattern (Optical illusion) SDXL
|
342 |
5. Convert an image to a coloring drawing
|
343 |
+
6. ControlNet OpenPose SD 1.5 and Latent upscale
|
|
|
|
|
344 |
|
345 |
- Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
|
346 |
"""
|
|
|
467 |
35,
|
468 |
False,
|
469 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
470 |
[
|
471 |
"1girl,face,curly hair,red hair,white background,",
|
472 |
"(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
|
requirements.txt
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
-
stablepy
|
2 |
torch==2.2.0
|
3 |
gdown
|
4 |
opencv-python
|
5 |
-
unidecode
|
6 |
-
pydantic==2.10.6
|
7 |
-
huggingface_hub==0.29.3
|
|
|
1 |
+
git+https://github.com/R3gm/stablepy.git@a9fe2dc # -b refactor_sampler_fix
|
2 |
torch==2.2.0
|
3 |
gdown
|
4 |
opencv-python
|
5 |
+
unidecode
|
|
|
|
utils.py
CHANGED
@@ -62,7 +62,7 @@ class ModelInformation:
|
|
62 |
self.download_url = json_data.get("downloadUrl", "")
|
63 |
self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
|
64 |
self.filename_url = next(
|
65 |
-
(v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "")
|
66 |
)
|
67 |
self.filename_url = self.filename_url if self.filename_url else ""
|
68 |
self.description = json_data.get("description", "")
|
@@ -300,7 +300,7 @@ def get_model_type(repo_id: str):
|
|
300 |
default = "SD 1.5"
|
301 |
try:
|
302 |
if os.path.exists(repo_id):
|
303 |
-
tag
|
304 |
return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
|
305 |
else:
|
306 |
model = api.model_info(repo_id=repo_id, timeout=5.0)
|
|
|
62 |
self.download_url = json_data.get("downloadUrl", "")
|
63 |
self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
|
64 |
self.filename_url = next(
|
65 |
+
(v.get("name", "") for v in reversed(json_data.get("files", [])) if str(self.model_version_id) in v.get("downloadUrl", "")), ""
|
66 |
)
|
67 |
self.filename_url = self.filename_url if self.filename_url else ""
|
68 |
self.description = json_data.get("description", "")
|
|
|
300 |
default = "SD 1.5"
|
301 |
try:
|
302 |
if os.path.exists(repo_id):
|
303 |
+
tag = checkpoint_model_type(repo_id)
|
304 |
return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
|
305 |
else:
|
306 |
model = api.model_info(repo_id=repo_id, timeout=5.0)
|