Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,517 +1,1376 @@
|
|
|
|
1 |
import os
|
2 |
-
from stablepy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from stablepy import (
|
4 |
scheduler_names,
|
5 |
-
|
6 |
-
|
7 |
-
ALL_BUILTIN_UPSCALERS,
|
8 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
DOWNLOAD_VAE = "https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
|
15 |
-
|
16 |
-
# - **Download LoRAs**
|
17 |
-
DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
|
18 |
-
|
19 |
-
LOAD_DIFFUSERS_FORMAT_MODEL = [
|
20 |
-
'stabilityai/stable-diffusion-xl-base-1.0',
|
21 |
-
'Laxhar/noobai-XL-1.1',
|
22 |
-
'black-forest-labs/FLUX.1-dev',
|
23 |
-
'John6666/blue-pencil-flux1-v021-fp8-flux',
|
24 |
-
'John6666/wai-ani-flux-v10forfp8-fp8-flux',
|
25 |
-
'John6666/xe-anime-flux-v04-fp8-flux',
|
26 |
-
'John6666/lyh-anime-flux-v2a1-fp8-flux',
|
27 |
-
'John6666/carnival-unchained-v10-fp8-flux',
|
28 |
-
'John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux',
|
29 |
-
'Freepik/flux.1-lite-8B-alpha',
|
30 |
-
'shauray/FluxDev-HyperSD-merged',
|
31 |
-
'mikeyandfriends/PixelWave_FLUX.1-dev_03',
|
32 |
-
'terminusresearch/FluxBooru-v0.3',
|
33 |
-
'black-forest-labs/FLUX.1-schnell',
|
34 |
-
'ostris/OpenFLUX.1',
|
35 |
-
'shuttleai/shuttle-3-diffusion',
|
36 |
-
'Laxhar/noobai-XL-1.0',
|
37 |
-
'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
|
38 |
-
'Laxhar/noobai-XL-0.77',
|
39 |
-
'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
|
40 |
-
'Laxhar/noobai-XL-0.6',
|
41 |
-
'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
|
42 |
-
'John6666/noobai-cyberfix-v10-sdxl',
|
43 |
-
'John6666/noobaiiter-xl-vpred-v075-sdxl',
|
44 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
|
45 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
|
46 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
|
47 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
|
48 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
|
49 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
|
50 |
-
'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
|
51 |
-
'John6666/mistoon-anime-v10illustrious-sdxl',
|
52 |
-
'John6666/hassaku-xl-illustrious-v10-sdxl',
|
53 |
-
'John6666/hassaku-xl-illustrious-v10style-sdxl',
|
54 |
-
'John6666/haruki-mix-illustrious-v10-sdxl',
|
55 |
-
'John6666/noobreal-v10-sdxl',
|
56 |
-
'John6666/complicated-noobai-merge-vprediction-sdxl',
|
57 |
-
'Laxhar/noobai-XL-Vpred-0.9r',
|
58 |
-
'Laxhar/noobai-XL-Vpred-0.75s',
|
59 |
-
'Laxhar/noobai-XL-Vpred-0.75',
|
60 |
-
'Laxhar/noobai-XL-Vpred-0.65s',
|
61 |
-
'Laxhar/noobai-XL-Vpred-0.65',
|
62 |
-
'Laxhar/noobai-XL-Vpred-0.6',
|
63 |
-
'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
|
64 |
-
'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
|
65 |
-
'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
|
66 |
-
'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
|
67 |
-
'John6666/chadmix-noobai075-illustrious01-v10-sdxl',
|
68 |
-
'OnomaAIResearch/Illustrious-xl-early-release-v0',
|
69 |
-
'John6666/illustriousxl-mmmix-v50-sdxl',
|
70 |
-
'John6666/illustrious-pencil-xl-v200-sdxl',
|
71 |
-
'John6666/obsession-illustriousxl-v21-sdxl',
|
72 |
-
'John6666/obsession-illustriousxl-v30-sdxl',
|
73 |
-
'John6666/obsession-illustriousxl-v31-sdxl',
|
74 |
-
'John6666/wai-nsfw-illustrious-v70-sdxl',
|
75 |
-
'John6666/illustrious-pony-mix-v3-sdxl',
|
76 |
-
'John6666/nova-anime-xl-illustriousv10-sdxl',
|
77 |
-
'John6666/nova-orange-xl-v30-sdxl',
|
78 |
-
'John6666/silvermoon-mix03-illustrious-v10-sdxl',
|
79 |
-
'eienmojiki/Anything-XL',
|
80 |
-
'eienmojiki/Starry-XL-v5.2',
|
81 |
-
'John6666/meinaxl-v2-sdxl',
|
82 |
-
'Eugeoter/artiwaifu-diffusion-2.0',
|
83 |
-
'comin/IterComp',
|
84 |
-
'John6666/epicrealism-xl-vxiabeast-sdxl',
|
85 |
-
'John6666/epicrealism-xl-v10kiss2-sdxl',
|
86 |
-
'John6666/epicrealism-xl-v8kiss-sdxl',
|
87 |
-
'misri/zavychromaxl_v80',
|
88 |
-
'SG161222/RealVisXL_V4.0',
|
89 |
-
'SG161222/RealVisXL_V5.0',
|
90 |
-
'misri/newrealityxlAllInOne_Newreality40',
|
91 |
-
'gsdf/CounterfeitXL',
|
92 |
-
'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
|
93 |
-
'kitty7779/ponyDiffusionV6XL',
|
94 |
-
'GraydientPlatformAPI/aniverse-pony',
|
95 |
-
'John6666/ras-real-anime-screencap-v1-sdxl',
|
96 |
-
'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
|
97 |
-
'John6666/mistoon-anime-ponyalpha-sdxl',
|
98 |
-
'John6666/mistoon-xl-copper-v20fast-sdxl',
|
99 |
-
'John6666/ebara-mfcg-pony-mix-v12-sdxl',
|
100 |
-
'John6666/t-ponynai3-v51-sdxl',
|
101 |
-
'John6666/t-ponynai3-v65-sdxl',
|
102 |
-
'John6666/prefect-pony-xl-v3-sdxl',
|
103 |
-
'John6666/prefect-pony-xl-v4-sdxl',
|
104 |
-
'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
|
105 |
-
'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
|
106 |
-
'John6666/wai-real-mix-v11-sdxl',
|
107 |
-
'John6666/wai-shuffle-pdxl-v2-sdxl',
|
108 |
-
'John6666/wai-c-v6-sdxl',
|
109 |
-
'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
|
110 |
-
'John6666/sifw-annihilation-xl-v2-sdxl',
|
111 |
-
'John6666/photo-realistic-pony-v5-sdxl',
|
112 |
-
'John6666/pony-realism-v21main-sdxl',
|
113 |
-
'John6666/pony-realism-v22main-sdxl',
|
114 |
-
'John6666/cyberrealistic-pony-v63-sdxl',
|
115 |
-
'John6666/cyberrealistic-pony-v64-sdxl',
|
116 |
-
'John6666/cyberrealistic-pony-v65-sdxl',
|
117 |
-
'John6666/cyberrealistic-pony-v7-sdxl',
|
118 |
-
'GraydientPlatformAPI/realcartoon-pony-diffusion',
|
119 |
-
'John6666/nova-anime-xl-pony-v5-sdxl',
|
120 |
-
'John6666/autismmix-sdxl-autismmix-pony-sdxl',
|
121 |
-
'John6666/aimz-dream-real-pony-mix-v3-sdxl',
|
122 |
-
'John6666/prefectious-xl-nsfw-v10-sdxl',
|
123 |
-
'GraydientPlatformAPI/iniverseponyRealGuofeng49',
|
124 |
-
'John6666/duchaiten-pony-real-v11fix-sdxl',
|
125 |
-
'John6666/duchaiten-pony-real-v20-sdxl',
|
126 |
-
'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
|
127 |
-
'Spestly/OdysseyXL-3.0',
|
128 |
-
'KBlueLeaf/Kohaku-XL-Zeta',
|
129 |
-
'cagliostrolab/animagine-xl-3.1',
|
130 |
-
'yodayo-ai/kivotos-xl-2.0',
|
131 |
-
'yodayo-ai/holodayo-xl-2.1',
|
132 |
-
'yodayo-ai/clandestine-xl-1.0',
|
133 |
-
'digiplay/majicMIX_sombre_v2',
|
134 |
-
'digiplay/majicMIX_realistic_v6',
|
135 |
-
'digiplay/majicMIX_realistic_v7',
|
136 |
-
'digiplay/DreamShaper_8',
|
137 |
-
'digiplay/BeautifulArt_v1',
|
138 |
-
'digiplay/DarkSushi2.5D_v1',
|
139 |
-
'digiplay/darkphoenix3D_v1.1',
|
140 |
-
'digiplay/BeenYouLiteL11_diffusers',
|
141 |
-
'GraydientPlatformAPI/rev-animated2',
|
142 |
-
'myxlmynx/cyberrealistic_classic40',
|
143 |
-
'GraydientPlatformAPI/cyberreal6',
|
144 |
-
'GraydientPlatformAPI/cyberreal5',
|
145 |
-
'youknownothing/deliberate-v6',
|
146 |
-
'GraydientPlatformAPI/deliberate-cyber3',
|
147 |
-
'GraydientPlatformAPI/picx-real',
|
148 |
-
'GraydientPlatformAPI/perfectworld6',
|
149 |
-
'emilianJR/epiCRealism',
|
150 |
-
'votepurchase/counterfeitV30_v30',
|
151 |
-
'votepurchase/ChilloutMix',
|
152 |
-
'Meina/MeinaMix_V11',
|
153 |
-
'Meina/MeinaUnreal_V5',
|
154 |
-
'Meina/MeinaPastel_V7',
|
155 |
-
'GraydientPlatformAPI/realcartoon3d-17',
|
156 |
-
'GraydientPlatformAPI/realcartoon-pixar11',
|
157 |
-
'GraydientPlatformAPI/realcartoon-real17',
|
158 |
-
'nitrosocke/Ghibli-Diffusion',
|
159 |
-
]
|
160 |
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
"Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design",
|
165 |
-
]
|
166 |
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
]
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
DIRECTORY_LORAS = 'loras'
|
178 |
-
DIRECTORY_VAES = 'vaes'
|
179 |
-
DIRECTORY_EMBEDS = 'embedings'
|
180 |
-
DIRECTORY_UPSCALERS = 'upscalers'
|
181 |
-
|
182 |
-
CACHE_HF = "/home/user/.cache/huggingface/hub/"
|
183 |
-
STORAGE_ROOT = "/home/user/"
|
184 |
-
|
185 |
-
TASK_STABLEPY = {
|
186 |
-
'txt2img': 'txt2img',
|
187 |
-
'img2img': 'img2img',
|
188 |
-
'inpaint': 'inpaint',
|
189 |
-
# 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
|
190 |
-
# 'sketch T2I Adapter': 'sdxl_sketch_t2i',
|
191 |
-
# 'lineart T2I Adapter': 'sdxl_lineart_t2i',
|
192 |
-
# 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
|
193 |
-
# 'openpose T2I Adapter': 'sdxl_openpose_t2i',
|
194 |
-
'openpose ControlNet': 'openpose',
|
195 |
-
'canny ControlNet': 'canny',
|
196 |
-
'mlsd ControlNet': 'mlsd',
|
197 |
-
'scribble ControlNet': 'scribble',
|
198 |
-
'softedge ControlNet': 'softedge',
|
199 |
-
'segmentation ControlNet': 'segmentation',
|
200 |
-
'depth ControlNet': 'depth',
|
201 |
-
'normalbae ControlNet': 'normalbae',
|
202 |
-
'lineart ControlNet': 'lineart',
|
203 |
-
'lineart_anime ControlNet': 'lineart_anime',
|
204 |
-
'shuffle ControlNet': 'shuffle',
|
205 |
-
'ip2p ControlNet': 'ip2p',
|
206 |
-
'optical pattern ControlNet': 'pattern',
|
207 |
-
'recolor ControlNet': 'recolor',
|
208 |
-
'tile ControlNet': 'tile',
|
209 |
-
'repaint ControlNet': 'repaint',
|
210 |
-
}
|
211 |
-
|
212 |
-
TASK_MODEL_LIST = list(TASK_STABLEPY.keys())
|
213 |
-
|
214 |
-
UPSCALER_DICT_GUI = {
|
215 |
-
None: None,
|
216 |
-
**{bu: bu for bu in ALL_BUILTIN_UPSCALERS if bu not in ["HAT x4", "DAT x4", "DAT x3", "DAT x2", "SwinIR 4x"]},
|
217 |
-
# "RealESRGAN_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
|
218 |
-
"RealESRNet_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
|
219 |
-
# "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
|
220 |
-
# "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
|
221 |
-
# "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
|
222 |
-
# "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
|
223 |
-
# "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
|
224 |
-
"4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
|
225 |
-
"4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
|
226 |
-
"Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
|
227 |
-
"AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
|
228 |
-
"lollypop": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
|
229 |
-
"RealisticRescaler4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
|
230 |
-
"NickelbackFS4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
|
231 |
-
}
|
232 |
-
|
233 |
-
UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
|
234 |
-
|
235 |
-
DIFFUSERS_CONTROLNET_MODEL = [
|
236 |
-
"Automatic",
|
237 |
-
|
238 |
-
"brad-twinkl/controlnet-union-sdxl-1.0-promax",
|
239 |
-
"xinsir/controlnet-union-sdxl-1.0",
|
240 |
-
"xinsir/anime-painter",
|
241 |
-
"Eugeoter/noob-sdxl-controlnet-canny",
|
242 |
-
"Eugeoter/noob-sdxl-controlnet-lineart_anime",
|
243 |
-
"Eugeoter/noob-sdxl-controlnet-depth",
|
244 |
-
"Eugeoter/noob-sdxl-controlnet-normal",
|
245 |
-
"Eugeoter/noob-sdxl-controlnet-softedge_hed",
|
246 |
-
"Eugeoter/noob-sdxl-controlnet-scribble_pidinet",
|
247 |
-
"Eugeoter/noob-sdxl-controlnet-scribble_hed",
|
248 |
-
"Eugeoter/noob-sdxl-controlnet-manga_line",
|
249 |
-
"Eugeoter/noob-sdxl-controlnet-lineart_realistic",
|
250 |
-
"Eugeoter/noob-sdxl-controlnet-depth_midas-v1-1",
|
251 |
-
"dimitribarbot/controlnet-openpose-sdxl-1.0-safetensors",
|
252 |
-
"r3gm/controlnet-openpose-sdxl-1.0-fp16",
|
253 |
-
"r3gm/controlnet-canny-scribble-integrated-sdxl-v2-fp16",
|
254 |
-
"r3gm/controlnet-union-sdxl-1.0-fp16",
|
255 |
-
"r3gm/controlnet-lineart-anime-sdxl-fp16",
|
256 |
-
"r3gm/control_v1p_sdxl_qrcode_monster_fp16",
|
257 |
-
"r3gm/controlnet-tile-sdxl-1.0-fp16",
|
258 |
-
"r3gm/controlnet-recolor-sdxl-fp16",
|
259 |
-
"r3gm/controlnet-openpose-twins-sdxl-1.0-fp16",
|
260 |
-
"r3gm/controlnet-qr-pattern-sdxl-fp16",
|
261 |
-
"Yakonrus/SDXL_Controlnet_Tile_Realistic_v2",
|
262 |
-
"TheMistoAI/MistoLine",
|
263 |
-
"briaai/BRIA-2.3-ControlNet-Recoloring",
|
264 |
-
"briaai/BRIA-2.3-ControlNet-Canny",
|
265 |
-
|
266 |
-
"lllyasviel/control_v11p_sd15_openpose",
|
267 |
-
"lllyasviel/control_v11p_sd15_canny",
|
268 |
-
"lllyasviel/control_v11p_sd15_mlsd",
|
269 |
-
"lllyasviel/control_v11p_sd15_scribble",
|
270 |
-
"lllyasviel/control_v11p_sd15_softedge",
|
271 |
-
"lllyasviel/control_v11p_sd15_seg",
|
272 |
-
"lllyasviel/control_v11f1p_sd15_depth",
|
273 |
-
"lllyasviel/control_v11p_sd15_normalbae",
|
274 |
-
"lllyasviel/control_v11p_sd15_lineart",
|
275 |
-
"lllyasviel/control_v11p_sd15s2_lineart_anime",
|
276 |
-
"lllyasviel/control_v11e_sd15_shuffle",
|
277 |
-
"lllyasviel/control_v11e_sd15_ip2p",
|
278 |
-
"lllyasviel/control_v11p_sd15_inpaint",
|
279 |
-
"monster-labs/control_v1p_sd15_qrcode_monster",
|
280 |
-
"lllyasviel/control_v11f1e_sd15_tile",
|
281 |
-
"latentcat/control_v1p_sd15_brightness",
|
282 |
-
"yuanqiuye/qrcode_controlnet_v3",
|
283 |
-
|
284 |
-
"Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
|
285 |
-
# "Shakker-Labs/FLUX.1-dev-ControlNet-Pose",
|
286 |
-
# "Shakker-Labs/FLUX.1-dev-ControlNet-Depth",
|
287 |
-
# "jasperai/Flux.1-dev-Controlnet-Upscaler",
|
288 |
-
# "jasperai/Flux.1-dev-Controlnet-Depth",
|
289 |
-
# "jasperai/Flux.1-dev-Controlnet-Surface-Normals",
|
290 |
-
# "XLabs-AI/flux-controlnet-canny-diffusers",
|
291 |
-
# "XLabs-AI/flux-controlnet-hed-diffusers",
|
292 |
-
# "XLabs-AI/flux-controlnet-depth-diffusers",
|
293 |
-
# "InstantX/FLUX.1-dev-Controlnet-Union",
|
294 |
-
# "InstantX/FLUX.1-dev-Controlnet-Canny",
|
295 |
-
]
|
296 |
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
(
|
301 |
-
("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
|
302 |
-
("Classic-sd_embed format: (word:weight)", "Classic-sd_embed"),
|
303 |
-
("Classic-ignore", "Classic-ignore"),
|
304 |
-
("None", "None"),
|
305 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
306 |
|
307 |
-
|
308 |
-
"Use the right VAE for your model to maintain image quality. The wrong"
|
309 |
-
" VAE can lead to poor results, like blurriness in the generated images."
|
310 |
-
)
|
311 |
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
"diffusers:StableDiffusionPipeline": "SD 1.5",
|
324 |
-
"diffusers:StableDiffusionXLPipeline": "SDXL",
|
325 |
-
"diffusers:FluxPipeline": "FLUX",
|
326 |
-
}
|
327 |
-
|
328 |
-
DIFFUSECRAFT_CHECKPOINT_NAME = {
|
329 |
-
"sd1.5": "SD 1.5",
|
330 |
-
"sdxl": "SDXL",
|
331 |
-
"flux-dev": "FLUX",
|
332 |
-
"flux-schnell": "FLUX",
|
333 |
-
}
|
334 |
-
|
335 |
-
POST_PROCESSING_SAMPLER = ["Use same sampler"] + [
|
336 |
-
name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
|
337 |
-
]
|
338 |
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
)
|
|
|
|
|
|
|
|
|
|
|
343 |
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
)
|
351 |
|
352 |
-
EXAMPLES_GUI_HELP = (
|
353 |
-
"""### The following examples perform specific tasks:
|
354 |
-
1. Generation with SDXL and upscale
|
355 |
-
2. Generation with FLUX dev
|
356 |
-
3. ControlNet Canny SDXL
|
357 |
-
4. Optical pattern (Optical illusion) SDXL
|
358 |
-
5. Convert an image to a coloring drawing
|
359 |
-
6. ControlNet OpenPose SD 1.5 and Latent upscale
|
360 |
-
|
361 |
-
- Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
|
362 |
-
"""
|
363 |
-
)
|
364 |
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
"
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
511 |
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
"""
|
517 |
)
|
|
|
1 |
+
import spaces
|
2 |
import os
|
3 |
+
from stablepy import (
|
4 |
+
Model_Diffusers,
|
5 |
+
SCHEDULE_TYPE_OPTIONS,
|
6 |
+
SCHEDULE_PREDICTION_TYPE_OPTIONS,
|
7 |
+
check_scheduler_compatibility,
|
8 |
+
TASK_AND_PREPROCESSORS,
|
9 |
+
FACE_RESTORATION_MODELS,
|
10 |
+
)
|
11 |
+
from constants import (
|
12 |
+
DIRECTORY_MODELS,
|
13 |
+
DIRECTORY_LORAS,
|
14 |
+
DIRECTORY_VAES,
|
15 |
+
DIRECTORY_EMBEDS,
|
16 |
+
DIRECTORY_UPSCALERS,
|
17 |
+
DOWNLOAD_MODEL,
|
18 |
+
DOWNLOAD_VAE,
|
19 |
+
DOWNLOAD_LORA,
|
20 |
+
LOAD_DIFFUSERS_FORMAT_MODEL,
|
21 |
+
DIFFUSERS_FORMAT_LORAS,
|
22 |
+
DOWNLOAD_EMBEDS,
|
23 |
+
CIVITAI_API_KEY,
|
24 |
+
HF_TOKEN,
|
25 |
+
TASK_STABLEPY,
|
26 |
+
TASK_MODEL_LIST,
|
27 |
+
UPSCALER_DICT_GUI,
|
28 |
+
UPSCALER_KEYS,
|
29 |
+
PROMPT_W_OPTIONS,
|
30 |
+
WARNING_MSG_VAE,
|
31 |
+
SDXL_TASK,
|
32 |
+
MODEL_TYPE_TASK,
|
33 |
+
POST_PROCESSING_SAMPLER,
|
34 |
+
SUBTITLE_GUI,
|
35 |
+
HELP_GUI,
|
36 |
+
EXAMPLES_GUI_HELP,
|
37 |
+
EXAMPLES_GUI,
|
38 |
+
RESOURCES,
|
39 |
+
DIFFUSERS_CONTROLNET_MODEL,
|
40 |
+
)
|
41 |
+
from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
|
42 |
+
import torch
|
43 |
+
import re
|
44 |
from stablepy import (
|
45 |
scheduler_names,
|
46 |
+
IP_ADAPTERS_SD,
|
47 |
+
IP_ADAPTERS_SDXL,
|
|
|
48 |
)
|
49 |
+
import time
|
50 |
+
from PIL import ImageFile
|
51 |
+
from utils import (
|
52 |
+
download_things,
|
53 |
+
get_model_list,
|
54 |
+
extract_parameters,
|
55 |
+
get_my_lora,
|
56 |
+
get_model_type,
|
57 |
+
extract_exif_data,
|
58 |
+
create_mask_now,
|
59 |
+
download_diffuser_repo,
|
60 |
+
get_used_storage_gb,
|
61 |
+
delete_model,
|
62 |
+
progress_step_bar,
|
63 |
+
html_template_message,
|
64 |
+
escape_html,
|
65 |
+
)
|
66 |
+
from image_processor import preprocessor_tab
|
67 |
+
from datetime import datetime
|
68 |
+
import gradio as gr
|
69 |
+
import logging
|
70 |
+
import diffusers
|
71 |
+
import warnings
|
72 |
+
from stablepy import logger
|
73 |
+
from diffusers import FluxPipeline
|
74 |
+
# import urllib.parse
|
75 |
|
76 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
77 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
78 |
+
# os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
|
79 |
+
print(os.getenv("SPACES_ZERO_GPU"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
+
directories = [DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS, DIRECTORY_UPSCALERS]
|
82 |
+
for directory in directories:
|
83 |
+
os.makedirs(directory, exist_ok=True)
|
|
|
|
|
84 |
|
85 |
+
# Download stuffs
|
86 |
+
for url in [url.strip() for url in DOWNLOAD_MODEL.split(',')]:
|
87 |
+
if not os.path.exists(f"./models/{url.split('/')[-1]}"):
|
88 |
+
download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
|
89 |
+
for url in [url.strip() for url in DOWNLOAD_VAE.split(',')]:
|
90 |
+
if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
|
91 |
+
download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
|
92 |
+
for url in [url.strip() for url in DOWNLOAD_LORA.split(',')]:
|
93 |
+
if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
|
94 |
+
download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
|
95 |
|
96 |
+
# Download Embeddings
|
97 |
+
for url_embed in DOWNLOAD_EMBEDS:
|
98 |
+
if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
|
99 |
+
download_things(DIRECTORY_EMBEDS, url_embed, HF_TOKEN, CIVITAI_API_KEY)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
+
# Build list models
|
102 |
+
embed_list = get_model_list(DIRECTORY_EMBEDS)
|
103 |
+
embed_list = [
|
104 |
+
(os.path.splitext(os.path.basename(emb))[0], emb) for emb in embed_list
|
|
|
|
|
|
|
|
|
105 |
]
|
106 |
+
single_file_model_list = get_model_list(DIRECTORY_MODELS)
|
107 |
+
model_list = LOAD_DIFFUSERS_FORMAT_MODEL + single_file_model_list
|
108 |
+
lora_model_list = get_model_list(DIRECTORY_LORAS)
|
109 |
+
lora_model_list.insert(0, "None")
|
110 |
+
lora_model_list = lora_model_list + DIFFUSERS_FORMAT_LORAS
|
111 |
+
vae_model_list = get_model_list(DIRECTORY_VAES)
|
112 |
+
vae_model_list.insert(0, "BakedVAE")
|
113 |
+
vae_model_list.insert(0, "None")
|
114 |
|
115 |
+
print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
|
|
|
|
|
|
|
116 |
|
117 |
+
flux_repo = "camenduru/FLUX.1-dev-diffusers"
|
118 |
+
flux_pipe = FluxPipeline.from_pretrained(
|
119 |
+
flux_repo,
|
120 |
+
transformer=None,
|
121 |
+
torch_dtype=torch.bfloat16,
|
122 |
+
).to("cuda")
|
123 |
+
components = flux_pipe.components
|
124 |
+
components.pop("transformer", None)
|
125 |
+
components.pop("scheduler", None)
|
126 |
+
delete_model(flux_repo)
|
127 |
+
# components = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
+
#######################
|
130 |
+
# GUI
|
131 |
+
#######################
|
132 |
+
logging.getLogger("diffusers").setLevel(logging.ERROR)
|
133 |
+
diffusers.utils.logging.set_verbosity(40)
|
134 |
+
warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
|
135 |
+
warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
|
136 |
+
warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
|
137 |
+
logger.setLevel(logging.DEBUG)
|
138 |
|
139 |
+
CSS = """
|
140 |
+
.contain { display: flex; flex-direction: column; }
|
141 |
+
#component-0 { height: 100%; }
|
142 |
+
#gallery { flex-grow: 1; }
|
143 |
+
#load_model { height: 50px; }
|
144 |
+
"""
|
|
|
145 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
+
class GuiSD:
|
148 |
+
def __init__(self, stream=True):
|
149 |
+
self.model = None
|
150 |
+
self.status_loading = False
|
151 |
+
self.sleep_loading = 4
|
152 |
+
self.last_load = datetime.now()
|
153 |
+
self.inventory = []
|
154 |
+
|
155 |
+
def update_storage_models(self, storage_floor_gb=24, required_inventory_for_purge=3):
|
156 |
+
while get_used_storage_gb() > storage_floor_gb:
|
157 |
+
if len(self.inventory) < required_inventory_for_purge:
|
158 |
+
break
|
159 |
+
removal_candidate = self.inventory.pop(0)
|
160 |
+
delete_model(removal_candidate)
|
161 |
+
|
162 |
+
def update_inventory(self, model_name):
|
163 |
+
if model_name not in single_file_model_list:
|
164 |
+
self.inventory = [
|
165 |
+
m for m in self.inventory if m != model_name
|
166 |
+
] + [model_name]
|
167 |
+
print(self.inventory)
|
168 |
+
|
169 |
+
def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
|
170 |
+
|
171 |
+
# download link model > model_name
|
172 |
+
|
173 |
+
self.update_storage_models()
|
174 |
+
|
175 |
+
vae_model = vae_model if vae_model != "None" else None
|
176 |
+
model_type = get_model_type(model_name)
|
177 |
+
dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
|
178 |
+
|
179 |
+
if not os.path.exists(model_name):
|
180 |
+
_ = download_diffuser_repo(
|
181 |
+
repo_name=model_name,
|
182 |
+
model_type=model_type,
|
183 |
+
revision="main",
|
184 |
+
token=True,
|
185 |
+
)
|
186 |
+
|
187 |
+
self.update_inventory(model_name)
|
188 |
+
|
189 |
+
for i in range(68):
|
190 |
+
if not self.status_loading:
|
191 |
+
self.status_loading = True
|
192 |
+
if i > 0:
|
193 |
+
time.sleep(self.sleep_loading)
|
194 |
+
print("Previous model ops...")
|
195 |
+
break
|
196 |
+
time.sleep(0.5)
|
197 |
+
print(f"Waiting queue {i}")
|
198 |
+
yield "Waiting queue"
|
199 |
+
|
200 |
+
self.status_loading = True
|
201 |
+
|
202 |
+
yield f"Loading model: {model_name}"
|
203 |
+
|
204 |
+
if vae_model == "BakedVAE":
|
205 |
+
vae_model = model_name
|
206 |
+
elif vae_model:
|
207 |
+
vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
|
208 |
+
if model_type != vae_type:
|
209 |
+
gr.Warning(WARNING_MSG_VAE)
|
210 |
+
|
211 |
+
print("Loading model...")
|
212 |
+
|
213 |
+
try:
|
214 |
+
start_time = time.time()
|
215 |
+
|
216 |
+
if self.model is None:
|
217 |
+
self.model = Model_Diffusers(
|
218 |
+
base_model_id=model_name,
|
219 |
+
task_name=TASK_STABLEPY[task],
|
220 |
+
vae_model=vae_model,
|
221 |
+
type_model_precision=dtype_model,
|
222 |
+
retain_task_model_in_cache=False,
|
223 |
+
controlnet_model=controlnet_model,
|
224 |
+
device="cpu",
|
225 |
+
env_components=components,
|
226 |
+
)
|
227 |
+
self.model.advanced_params(image_preprocessor_cuda_active=True)
|
228 |
+
else:
|
229 |
+
if self.model.base_model_id != model_name:
|
230 |
+
load_now_time = datetime.now()
|
231 |
+
elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
|
232 |
+
|
233 |
+
if elapsed_time <= 9:
|
234 |
+
print("Waiting for the previous model's time ops...")
|
235 |
+
time.sleep(9 - elapsed_time)
|
236 |
+
|
237 |
+
self.model.device = torch.device("cpu")
|
238 |
+
self.model.load_pipe(
|
239 |
+
model_name,
|
240 |
+
task_name=TASK_STABLEPY[task],
|
241 |
+
vae_model=vae_model,
|
242 |
+
type_model_precision=dtype_model,
|
243 |
+
retain_task_model_in_cache=False,
|
244 |
+
controlnet_model=controlnet_model,
|
245 |
+
)
|
246 |
+
|
247 |
+
end_time = time.time()
|
248 |
+
self.sleep_loading = max(min(int(end_time - start_time), 10), 4)
|
249 |
+
except Exception as e:
|
250 |
+
self.last_load = datetime.now()
|
251 |
+
self.status_loading = False
|
252 |
+
self.sleep_loading = 4
|
253 |
+
raise e
|
254 |
+
|
255 |
+
self.last_load = datetime.now()
|
256 |
+
self.status_loading = False
|
257 |
+
|
258 |
+
yield f"Model loaded: {model_name}"
|
259 |
+
|
260 |
+
# @spaces.GPU(duration=59)
|
261 |
+
@torch.inference_mode()
|
262 |
+
def generate_pipeline(
|
263 |
+
self,
|
264 |
+
prompt,
|
265 |
+
neg_prompt,
|
266 |
+
num_images,
|
267 |
+
steps,
|
268 |
+
cfg,
|
269 |
+
clip_skip,
|
270 |
+
seed,
|
271 |
+
lora1,
|
272 |
+
lora_scale1,
|
273 |
+
lora2,
|
274 |
+
lora_scale2,
|
275 |
+
lora3,
|
276 |
+
lora_scale3,
|
277 |
+
lora4,
|
278 |
+
lora_scale4,
|
279 |
+
lora5,
|
280 |
+
lora_scale5,
|
281 |
+
lora6,
|
282 |
+
lora_scale6,
|
283 |
+
lora7,
|
284 |
+
lora_scale7,
|
285 |
+
sampler,
|
286 |
+
schedule_type,
|
287 |
+
schedule_prediction_type,
|
288 |
+
img_height,
|
289 |
+
img_width,
|
290 |
+
model_name,
|
291 |
+
vae_model,
|
292 |
+
task,
|
293 |
+
image_control,
|
294 |
+
preprocessor_name,
|
295 |
+
preprocess_resolution,
|
296 |
+
image_resolution,
|
297 |
+
style_prompt, # list []
|
298 |
+
style_json_file,
|
299 |
+
image_mask,
|
300 |
+
strength,
|
301 |
+
low_threshold,
|
302 |
+
high_threshold,
|
303 |
+
value_threshold,
|
304 |
+
distance_threshold,
|
305 |
+
recolor_gamma_correction,
|
306 |
+
tile_blur_sigma,
|
307 |
+
controlnet_output_scaling_in_unet,
|
308 |
+
controlnet_start_threshold,
|
309 |
+
controlnet_stop_threshold,
|
310 |
+
textual_inversion,
|
311 |
+
syntax_weights,
|
312 |
+
upscaler_model_path,
|
313 |
+
upscaler_increases_size,
|
314 |
+
upscaler_tile_size,
|
315 |
+
upscaler_tile_overlap,
|
316 |
+
hires_steps,
|
317 |
+
hires_denoising_strength,
|
318 |
+
hires_sampler,
|
319 |
+
hires_prompt,
|
320 |
+
hires_negative_prompt,
|
321 |
+
hires_before_adetailer,
|
322 |
+
hires_after_adetailer,
|
323 |
+
hires_schedule_type,
|
324 |
+
hires_guidance_scale,
|
325 |
+
controlnet_model,
|
326 |
+
loop_generation,
|
327 |
+
leave_progress_bar,
|
328 |
+
disable_progress_bar,
|
329 |
+
image_previews,
|
330 |
+
display_images,
|
331 |
+
save_generated_images,
|
332 |
+
filename_pattern,
|
333 |
+
image_storage_location,
|
334 |
+
retain_compel_previous_load,
|
335 |
+
retain_detailfix_model_previous_load,
|
336 |
+
retain_hires_model_previous_load,
|
337 |
+
t2i_adapter_preprocessor,
|
338 |
+
t2i_adapter_conditioning_scale,
|
339 |
+
t2i_adapter_conditioning_factor,
|
340 |
+
xformers_memory_efficient_attention,
|
341 |
+
freeu,
|
342 |
+
generator_in_cpu,
|
343 |
+
adetailer_inpaint_only,
|
344 |
+
adetailer_verbose,
|
345 |
+
adetailer_sampler,
|
346 |
+
adetailer_active_a,
|
347 |
+
prompt_ad_a,
|
348 |
+
negative_prompt_ad_a,
|
349 |
+
strength_ad_a,
|
350 |
+
face_detector_ad_a,
|
351 |
+
person_detector_ad_a,
|
352 |
+
hand_detector_ad_a,
|
353 |
+
mask_dilation_a,
|
354 |
+
mask_blur_a,
|
355 |
+
mask_padding_a,
|
356 |
+
adetailer_active_b,
|
357 |
+
prompt_ad_b,
|
358 |
+
negative_prompt_ad_b,
|
359 |
+
strength_ad_b,
|
360 |
+
face_detector_ad_b,
|
361 |
+
person_detector_ad_b,
|
362 |
+
hand_detector_ad_b,
|
363 |
+
mask_dilation_b,
|
364 |
+
mask_blur_b,
|
365 |
+
mask_padding_b,
|
366 |
+
retain_task_cache_gui,
|
367 |
+
guidance_rescale,
|
368 |
+
image_ip1,
|
369 |
+
mask_ip1,
|
370 |
+
model_ip1,
|
371 |
+
mode_ip1,
|
372 |
+
scale_ip1,
|
373 |
+
image_ip2,
|
374 |
+
mask_ip2,
|
375 |
+
model_ip2,
|
376 |
+
mode_ip2,
|
377 |
+
scale_ip2,
|
378 |
+
pag_scale,
|
379 |
+
face_restoration_model,
|
380 |
+
face_restoration_visibility,
|
381 |
+
face_restoration_weight,
|
382 |
+
):
|
383 |
+
info_state = html_template_message("Navigating latent space...")
|
384 |
+
yield info_state, gr.update(), gr.update()
|
385 |
+
|
386 |
+
vae_model = vae_model if vae_model != "None" else None
|
387 |
+
loras_list = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
|
388 |
+
vae_msg = f"VAE: {vae_model}" if vae_model else ""
|
389 |
+
msg_lora = ""
|
390 |
+
|
391 |
+
print("Config model:", model_name, vae_model, loras_list)
|
392 |
+
|
393 |
+
task = TASK_STABLEPY[task]
|
394 |
+
|
395 |
+
params_ip_img = []
|
396 |
+
params_ip_msk = []
|
397 |
+
params_ip_model = []
|
398 |
+
params_ip_mode = []
|
399 |
+
params_ip_scale = []
|
400 |
+
|
401 |
+
all_adapters = [
|
402 |
+
(image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1),
|
403 |
+
(image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2),
|
404 |
+
]
|
405 |
+
|
406 |
+
if not hasattr(self.model.pipe, "transformer"):
|
407 |
+
for imgip, mskip, modelip, modeip, scaleip in all_adapters:
|
408 |
+
if imgip:
|
409 |
+
params_ip_img.append(imgip)
|
410 |
+
if mskip:
|
411 |
+
params_ip_msk.append(mskip)
|
412 |
+
params_ip_model.append(modelip)
|
413 |
+
params_ip_mode.append(modeip)
|
414 |
+
params_ip_scale.append(scaleip)
|
415 |
+
|
416 |
+
concurrency = 5
|
417 |
+
self.model.stream_config(concurrency=concurrency, latent_resize_by=1, vae_decoding=False)
|
418 |
+
|
419 |
+
if task != "txt2img" and not image_control:
|
420 |
+
raise ValueError("Reference image is required. Please upload one in 'Image ControlNet/Inpaint/Img2img'.")
|
421 |
+
|
422 |
+
if task in ["inpaint", "repaint"] and not image_mask:
|
423 |
+
raise ValueError("Mask image not found. Upload one in 'Image Mask' to proceed.")
|
424 |
+
|
425 |
+
if "https://" not in str(UPSCALER_DICT_GUI[upscaler_model_path]):
|
426 |
+
upscaler_model = upscaler_model_path
|
427 |
+
else:
|
428 |
+
url_upscaler = UPSCALER_DICT_GUI[upscaler_model_path]
|
429 |
+
|
430 |
+
if not os.path.exists(f"./{DIRECTORY_UPSCALERS}/{url_upscaler.split('/')[-1]}"):
|
431 |
+
download_things(DIRECTORY_UPSCALERS, url_upscaler, HF_TOKEN)
|
432 |
+
|
433 |
+
upscaler_model = f"./{DIRECTORY_UPSCALERS}/{url_upscaler.split('/')[-1]}"
|
434 |
+
|
435 |
+
logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
|
436 |
+
|
437 |
+
adetailer_params_A = {
|
438 |
+
"face_detector_ad": face_detector_ad_a,
|
439 |
+
"person_detector_ad": person_detector_ad_a,
|
440 |
+
"hand_detector_ad": hand_detector_ad_a,
|
441 |
+
"prompt": prompt_ad_a,
|
442 |
+
"negative_prompt": negative_prompt_ad_a,
|
443 |
+
"strength": strength_ad_a,
|
444 |
+
# "image_list_task" : None,
|
445 |
+
"mask_dilation": mask_dilation_a,
|
446 |
+
"mask_blur": mask_blur_a,
|
447 |
+
"mask_padding": mask_padding_a,
|
448 |
+
"inpaint_only": adetailer_inpaint_only,
|
449 |
+
"sampler": adetailer_sampler,
|
450 |
+
}
|
451 |
+
|
452 |
+
adetailer_params_B = {
|
453 |
+
"face_detector_ad": face_detector_ad_b,
|
454 |
+
"person_detector_ad": person_detector_ad_b,
|
455 |
+
"hand_detector_ad": hand_detector_ad_b,
|
456 |
+
"prompt": prompt_ad_b,
|
457 |
+
"negative_prompt": negative_prompt_ad_b,
|
458 |
+
"strength": strength_ad_b,
|
459 |
+
# "image_list_task" : None,
|
460 |
+
"mask_dilation": mask_dilation_b,
|
461 |
+
"mask_blur": mask_blur_b,
|
462 |
+
"mask_padding": mask_padding_b,
|
463 |
+
}
|
464 |
+
pipe_params = {
|
465 |
+
"prompt": prompt,
|
466 |
+
"negative_prompt": neg_prompt,
|
467 |
+
"img_height": img_height,
|
468 |
+
"img_width": img_width,
|
469 |
+
"num_images": num_images,
|
470 |
+
"num_steps": steps,
|
471 |
+
"guidance_scale": cfg,
|
472 |
+
"clip_skip": clip_skip,
|
473 |
+
"pag_scale": float(pag_scale),
|
474 |
+
"seed": seed,
|
475 |
+
"image": image_control,
|
476 |
+
"preprocessor_name": preprocessor_name,
|
477 |
+
"preprocess_resolution": preprocess_resolution,
|
478 |
+
"image_resolution": image_resolution,
|
479 |
+
"style_prompt": style_prompt if style_prompt else "",
|
480 |
+
"style_json_file": "",
|
481 |
+
"image_mask": image_mask, # only for Inpaint
|
482 |
+
"strength": strength, # only for Inpaint or ...
|
483 |
+
"low_threshold": low_threshold,
|
484 |
+
"high_threshold": high_threshold,
|
485 |
+
"value_threshold": value_threshold,
|
486 |
+
"distance_threshold": distance_threshold,
|
487 |
+
"recolor_gamma_correction": float(recolor_gamma_correction),
|
488 |
+
"tile_blur_sigma": int(tile_blur_sigma),
|
489 |
+
"lora_A": lora1 if lora1 != "None" else None,
|
490 |
+
"lora_scale_A": lora_scale1,
|
491 |
+
"lora_B": lora2 if lora2 != "None" else None,
|
492 |
+
"lora_scale_B": lora_scale2,
|
493 |
+
"lora_C": lora3 if lora3 != "None" else None,
|
494 |
+
"lora_scale_C": lora_scale3,
|
495 |
+
"lora_D": lora4 if lora4 != "None" else None,
|
496 |
+
"lora_scale_D": lora_scale4,
|
497 |
+
"lora_E": lora5 if lora5 != "None" else None,
|
498 |
+
"lora_scale_E": lora_scale5,
|
499 |
+
"lora_F": lora6 if lora6 != "None" else None,
|
500 |
+
"lora_scale_F": lora_scale6,
|
501 |
+
"lora_G": lora7 if lora7 != "None" else None,
|
502 |
+
"lora_scale_G": lora_scale7,
|
503 |
+
"textual_inversion": embed_list if textual_inversion else [],
|
504 |
+
"syntax_weights": syntax_weights, # "Classic"
|
505 |
+
"sampler": sampler,
|
506 |
+
"schedule_type": schedule_type,
|
507 |
+
"schedule_prediction_type": schedule_prediction_type,
|
508 |
+
"xformers_memory_efficient_attention": xformers_memory_efficient_attention,
|
509 |
+
"gui_active": True,
|
510 |
+
"loop_generation": loop_generation,
|
511 |
+
"controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
|
512 |
+
"control_guidance_start": float(controlnet_start_threshold),
|
513 |
+
"control_guidance_end": float(controlnet_stop_threshold),
|
514 |
+
"generator_in_cpu": generator_in_cpu,
|
515 |
+
"FreeU": freeu,
|
516 |
+
"adetailer_A": adetailer_active_a,
|
517 |
+
"adetailer_A_params": adetailer_params_A,
|
518 |
+
"adetailer_B": adetailer_active_b,
|
519 |
+
"adetailer_B_params": adetailer_params_B,
|
520 |
+
"leave_progress_bar": leave_progress_bar,
|
521 |
+
"disable_progress_bar": disable_progress_bar,
|
522 |
+
"image_previews": image_previews,
|
523 |
+
"display_images": display_images,
|
524 |
+
"save_generated_images": save_generated_images,
|
525 |
+
"filename_pattern": filename_pattern,
|
526 |
+
"image_storage_location": image_storage_location,
|
527 |
+
"retain_compel_previous_load": retain_compel_previous_load,
|
528 |
+
"retain_detailfix_model_previous_load": retain_detailfix_model_previous_load,
|
529 |
+
"retain_hires_model_previous_load": retain_hires_model_previous_load,
|
530 |
+
"t2i_adapter_preprocessor": t2i_adapter_preprocessor,
|
531 |
+
"t2i_adapter_conditioning_scale": float(t2i_adapter_conditioning_scale),
|
532 |
+
"t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
|
533 |
+
"upscaler_model_path": upscaler_model,
|
534 |
+
"upscaler_increases_size": upscaler_increases_size,
|
535 |
+
"upscaler_tile_size": upscaler_tile_size,
|
536 |
+
"upscaler_tile_overlap": upscaler_tile_overlap,
|
537 |
+
"hires_steps": hires_steps,
|
538 |
+
"hires_denoising_strength": hires_denoising_strength,
|
539 |
+
"hires_prompt": hires_prompt,
|
540 |
+
"hires_negative_prompt": hires_negative_prompt,
|
541 |
+
"hires_sampler": hires_sampler,
|
542 |
+
"hires_before_adetailer": hires_before_adetailer,
|
543 |
+
"hires_after_adetailer": hires_after_adetailer,
|
544 |
+
"hires_schedule_type": hires_schedule_type,
|
545 |
+
"hires_guidance_scale": hires_guidance_scale,
|
546 |
+
"ip_adapter_image": params_ip_img,
|
547 |
+
"ip_adapter_mask": params_ip_msk,
|
548 |
+
"ip_adapter_model": params_ip_model,
|
549 |
+
"ip_adapter_mode": params_ip_mode,
|
550 |
+
"ip_adapter_scale": params_ip_scale,
|
551 |
+
"face_restoration_model": face_restoration_model,
|
552 |
+
"face_restoration_visibility": face_restoration_visibility,
|
553 |
+
"face_restoration_weight": face_restoration_weight,
|
554 |
+
}
|
555 |
+
|
556 |
+
# kwargs for diffusers pipeline
|
557 |
+
if guidance_rescale:
|
558 |
+
pipe_params["guidance_rescale"] = guidance_rescale
|
559 |
+
|
560 |
+
self.model.device = torch.device("cuda:0")
|
561 |
+
if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
|
562 |
+
self.model.pipe.transformer.to(self.model.device)
|
563 |
+
print("transformer to cuda")
|
564 |
+
|
565 |
+
actual_progress = 0
|
566 |
+
info_images = gr.update()
|
567 |
+
for img, [seed, image_path, metadata] in self.model(**pipe_params):
|
568 |
+
info_state = progress_step_bar(actual_progress, steps)
|
569 |
+
actual_progress += concurrency
|
570 |
+
if image_path:
|
571 |
+
info_images = f"Seeds: {str(seed)}"
|
572 |
+
if vae_msg:
|
573 |
+
info_images = info_images + "<br>" + vae_msg
|
574 |
+
|
575 |
+
if "Cannot copy out of meta tensor; no data!" in self.model.last_lora_error:
|
576 |
+
msg_ram = "Unable to process the LoRAs due to high RAM usage; please try again later."
|
577 |
+
print(msg_ram)
|
578 |
+
msg_lora += f"<br>{msg_ram}"
|
579 |
+
|
580 |
+
for status, lora in zip(self.model.lora_status, self.model.lora_memory):
|
581 |
+
if status:
|
582 |
+
msg_lora += f"<br>Loaded: {lora}"
|
583 |
+
elif status is not None:
|
584 |
+
msg_lora += f"<br>Error with: {lora}"
|
585 |
+
|
586 |
+
if msg_lora:
|
587 |
+
info_images += msg_lora
|
588 |
+
|
589 |
+
info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[-1]) + "<br>-------<br>"
|
590 |
+
|
591 |
+
download_links = "<br>".join(
|
592 |
+
[
|
593 |
+
f'<a href="{path.replace("/images/", "/file=/home/user/app/images/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
|
594 |
+
for i, path in enumerate(image_path)
|
595 |
+
]
|
596 |
+
)
|
597 |
+
if save_generated_images:
|
598 |
+
info_images += f"<br>{download_links}"
|
599 |
+
|
600 |
+
info_state = "COMPLETE"
|
601 |
+
|
602 |
+
yield info_state, img, info_images
|
603 |
+
|
604 |
+
|
605 |
+
def dynamic_gpu_duration(func, duration, *args):
|
606 |
+
|
607 |
+
# @torch.inference_mode()
|
608 |
+
@spaces.GPU(duration=duration)
|
609 |
+
def wrapped_func():
|
610 |
+
yield from func(*args)
|
611 |
+
|
612 |
+
return wrapped_func()
|
613 |
+
|
614 |
+
|
615 |
+
@spaces.GPU
|
616 |
+
def dummy_gpu():
|
617 |
+
return None
|
618 |
+
|
619 |
+
|
620 |
+
def sd_gen_generate_pipeline(*args):
|
621 |
+
gpu_duration_arg = int(args[-1]) if args[-1] else 59
|
622 |
+
verbose_arg = int(args[-2])
|
623 |
+
load_lora_cpu = args[-3]
|
624 |
+
generation_args = args[:-3]
|
625 |
+
lora_list = [
|
626 |
+
None if item == "None" else item
|
627 |
+
for item in [args[7], args[9], args[11], args[13], args[15], args[17], args[19]]
|
628 |
+
]
|
629 |
+
lora_status = [None] * sd_gen.model.num_loras
|
630 |
+
|
631 |
+
msg_load_lora = "Updating LoRAs in GPU..."
|
632 |
+
if load_lora_cpu:
|
633 |
+
msg_load_lora = "Updating LoRAs in CPU..."
|
634 |
+
|
635 |
+
if lora_list != sd_gen.model.lora_memory and lora_list != [None] * sd_gen.model.num_loras:
|
636 |
+
yield msg_load_lora, gr.update(), gr.update()
|
637 |
+
|
638 |
+
# Load lora in CPU
|
639 |
+
if load_lora_cpu:
|
640 |
+
lora_status = sd_gen.model.load_lora_on_the_fly(
|
641 |
+
lora_A=lora_list[0], lora_scale_A=args[8],
|
642 |
+
lora_B=lora_list[1], lora_scale_B=args[10],
|
643 |
+
lora_C=lora_list[2], lora_scale_C=args[12],
|
644 |
+
lora_D=lora_list[3], lora_scale_D=args[14],
|
645 |
+
lora_E=lora_list[4], lora_scale_E=args[16],
|
646 |
+
lora_F=lora_list[5], lora_scale_F=args[18],
|
647 |
+
lora_G=lora_list[6], lora_scale_G=args[20],
|
648 |
+
)
|
649 |
+
print(lora_status)
|
650 |
+
|
651 |
+
sampler_name = args[21]
|
652 |
+
schedule_type_name = args[22]
|
653 |
+
_, _, msg_sampler = check_scheduler_compatibility(
|
654 |
+
sd_gen.model.class_name, sampler_name, schedule_type_name
|
655 |
+
)
|
656 |
+
if msg_sampler:
|
657 |
+
gr.Warning(msg_sampler)
|
658 |
+
|
659 |
+
if verbose_arg:
|
660 |
+
for status, lora in zip(lora_status, lora_list):
|
661 |
+
if status:
|
662 |
+
gr.Info(f"LoRA loaded in CPU: {lora}")
|
663 |
+
elif status is not None:
|
664 |
+
gr.Warning(f"Failed to load LoRA: {lora}")
|
665 |
+
|
666 |
+
if lora_status == [None] * sd_gen.model.num_loras and sd_gen.model.lora_memory != [None] * sd_gen.model.num_loras and load_lora_cpu:
|
667 |
+
lora_cache_msg = ", ".join(
|
668 |
+
str(x) for x in sd_gen.model.lora_memory if x is not None
|
669 |
+
)
|
670 |
+
gr.Info(f"LoRAs in cache: {lora_cache_msg}")
|
671 |
+
|
672 |
+
msg_request = f"Requesting {gpu_duration_arg}s. of GPU time.\nModel: {sd_gen.model.base_model_id}"
|
673 |
+
if verbose_arg:
|
674 |
+
gr.Info(msg_request)
|
675 |
+
print(msg_request)
|
676 |
+
yield msg_request.replace("\n", "<br>"), gr.update(), gr.update()
|
677 |
+
|
678 |
+
start_time = time.time()
|
679 |
+
|
680 |
+
# yield from sd_gen.generate_pipeline(*generation_args)
|
681 |
+
yield from dynamic_gpu_duration(
|
682 |
+
sd_gen.generate_pipeline,
|
683 |
+
gpu_duration_arg,
|
684 |
+
*generation_args,
|
685 |
+
)
|
686 |
+
|
687 |
+
end_time = time.time()
|
688 |
+
execution_time = end_time - start_time
|
689 |
+
msg_task_complete = (
|
690 |
+
f"GPU task complete in: {int(round(execution_time, 0) + 1)} seconds"
|
691 |
+
)
|
692 |
+
|
693 |
+
if verbose_arg:
|
694 |
+
gr.Info(msg_task_complete)
|
695 |
+
print(msg_task_complete)
|
696 |
+
|
697 |
+
yield msg_task_complete, gr.update(), gr.update()
|
698 |
+
|
699 |
+
|
700 |
+
@spaces.GPU(duration=15)
|
701 |
+
def process_upscale(image, upscaler_name, upscaler_size):
|
702 |
+
if image is None: return None
|
703 |
+
|
704 |
+
from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
|
705 |
+
from stablepy import load_upscaler_model
|
706 |
+
|
707 |
+
image = image.convert("RGB")
|
708 |
+
exif_image = extract_exif_data(image)
|
709 |
+
|
710 |
+
name_upscaler = UPSCALER_DICT_GUI[upscaler_name]
|
711 |
+
|
712 |
+
if "https://" in str(name_upscaler):
|
713 |
+
|
714 |
+
if not os.path.exists(f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"):
|
715 |
+
download_things(DIRECTORY_UPSCALERS, name_upscaler, HF_TOKEN)
|
716 |
+
|
717 |
+
name_upscaler = f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"
|
718 |
+
|
719 |
+
scaler_beta = load_upscaler_model(model=name_upscaler, tile=0, tile_overlap=8, device="cuda", half=True)
|
720 |
+
image_up = scaler_beta.upscale(image, upscaler_size, True)
|
721 |
+
|
722 |
+
image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
|
723 |
+
|
724 |
+
return image_path
|
725 |
+
|
726 |
+
|
727 |
+
# https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
|
728 |
+
dynamic_gpu_duration.zerogpu = True
|
729 |
+
sd_gen_generate_pipeline.zerogpu = True
|
730 |
+
sd_gen = GuiSD()
|
731 |
+
|
732 |
+
with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
733 |
+
gr.Markdown("# 🧩 DiffuseCraft")
|
734 |
+
gr.Markdown(SUBTITLE_GUI)
|
735 |
+
with gr.Tab("Generation"):
|
736 |
+
with gr.Row():
|
737 |
+
|
738 |
+
with gr.Column(scale=2):
|
739 |
+
|
740 |
+
def update_task_options(model_name, task_name):
|
741 |
+
new_choices = MODEL_TYPE_TASK[get_model_type(model_name)]
|
742 |
+
|
743 |
+
if task_name not in new_choices:
|
744 |
+
task_name = "txt2img"
|
745 |
+
|
746 |
+
return gr.update(value=task_name, choices=new_choices)
|
747 |
+
|
748 |
+
task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
|
749 |
+
model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
|
750 |
+
prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
|
751 |
+
neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt", value="lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, worst quality, low quality, very displeasing, (bad)")
|
752 |
+
with gr.Row(equal_height=False):
|
753 |
+
set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
|
754 |
+
clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
|
755 |
+
set_random_seed = gr.Button(value="🎲", variant="secondary", size="sm")
|
756 |
+
generate_button = gr.Button(value="GENERATE IMAGE", variant="primary")
|
757 |
+
|
758 |
+
model_name_gui.change(
|
759 |
+
update_task_options,
|
760 |
+
[model_name_gui, task_gui],
|
761 |
+
[task_gui],
|
762 |
+
)
|
763 |
+
|
764 |
+
load_model_gui = gr.HTML(elem_id="load_model", elem_classes="contain")
|
765 |
+
|
766 |
+
result_images = gr.Gallery(
|
767 |
+
label="Generated images",
|
768 |
+
show_label=False,
|
769 |
+
elem_id="gallery",
|
770 |
+
columns=[2],
|
771 |
+
rows=[2],
|
772 |
+
object_fit="contain",
|
773 |
+
# height="auto",
|
774 |
+
interactive=False,
|
775 |
+
preview=False,
|
776 |
+
selected_index=50,
|
777 |
+
)
|
778 |
+
|
779 |
+
actual_task_info = gr.HTML()
|
780 |
+
|
781 |
+
with gr.Row(equal_height=False, variant="default"):
|
782 |
+
gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
|
783 |
+
with gr.Column():
|
784 |
+
verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
|
785 |
+
load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
|
786 |
+
|
787 |
+
with gr.Column(scale=1):
|
788 |
+
steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=28, label="Steps")
|
789 |
+
cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
|
790 |
+
sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
|
791 |
+
schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
|
792 |
+
img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Width")
|
793 |
+
img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
|
794 |
+
seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
|
795 |
+
pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
|
796 |
+
with gr.Row():
|
797 |
+
clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
|
798 |
+
free_u_gui = gr.Checkbox(value=False, label="FreeU")
|
799 |
+
|
800 |
+
with gr.Row(equal_height=False):
|
801 |
+
|
802 |
+
def run_set_params_gui(base_prompt, name_model):
|
803 |
+
valid_receptors = { # default values
|
804 |
+
"prompt": gr.update(value=base_prompt),
|
805 |
+
"neg_prompt": gr.update(value=""),
|
806 |
+
"Steps": gr.update(value=30),
|
807 |
+
"width": gr.update(value=1024),
|
808 |
+
"height": gr.update(value=1024),
|
809 |
+
"Seed": gr.update(value=-1),
|
810 |
+
"Sampler": gr.update(value="Euler"),
|
811 |
+
"CFG scale": gr.update(value=7.), # cfg
|
812 |
+
"Clip skip": gr.update(value=True),
|
813 |
+
"Model": gr.update(value=name_model),
|
814 |
+
"Schedule type": gr.update(value="Automatic"),
|
815 |
+
"PAG": gr.update(value=.0),
|
816 |
+
"FreeU": gr.update(value=False),
|
817 |
+
}
|
818 |
+
valid_keys = list(valid_receptors.keys())
|
819 |
+
|
820 |
+
parameters = extract_parameters(base_prompt)
|
821 |
+
# print(parameters)
|
822 |
+
|
823 |
+
if "Sampler" in parameters:
|
824 |
+
value_sampler = parameters["Sampler"]
|
825 |
+
for s_type in SCHEDULE_TYPE_OPTIONS:
|
826 |
+
if s_type in value_sampler:
|
827 |
+
value_sampler = value_sampler.replace(s_type, "").strip()
|
828 |
+
parameters["Sampler"] = value_sampler
|
829 |
+
parameters["Schedule type"] = s_type
|
830 |
+
|
831 |
+
for key, val in parameters.items():
|
832 |
+
# print(val)
|
833 |
+
if key in valid_keys:
|
834 |
+
try:
|
835 |
+
if key == "Sampler":
|
836 |
+
if val not in scheduler_names:
|
837 |
+
continue
|
838 |
+
if key == "Schedule type":
|
839 |
+
if val not in SCHEDULE_TYPE_OPTIONS:
|
840 |
+
val = "Automatic"
|
841 |
+
elif key == "Clip skip":
|
842 |
+
if "," in str(val):
|
843 |
+
val = val.replace(",", "")
|
844 |
+
if int(val) >= 2:
|
845 |
+
val = True
|
846 |
+
if key == "prompt":
|
847 |
+
if ">" in val and "<" in val:
|
848 |
+
val = re.sub(r'<[^>]+>', '', val)
|
849 |
+
print("Removed LoRA written in the prompt")
|
850 |
+
if key in ["prompt", "neg_prompt"]:
|
851 |
+
val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
|
852 |
+
if key in ["Steps", "width", "height", "Seed"]:
|
853 |
+
val = int(val)
|
854 |
+
if key == "FreeU":
|
855 |
+
val = True
|
856 |
+
if key in ["CFG scale", "PAG"]:
|
857 |
+
val = float(val)
|
858 |
+
if key == "Model":
|
859 |
+
filtered_models = [m for m in model_list if val in m]
|
860 |
+
if filtered_models:
|
861 |
+
val = filtered_models[0]
|
862 |
+
else:
|
863 |
+
val = name_model
|
864 |
+
if key == "Seed":
|
865 |
+
continue
|
866 |
+
valid_receptors[key] = gr.update(value=val)
|
867 |
+
# print(val, type(val))
|
868 |
+
# print(valid_receptors)
|
869 |
+
except Exception as e:
|
870 |
+
print(str(e))
|
871 |
+
return [value for value in valid_receptors.values()]
|
872 |
+
|
873 |
+
set_params_gui.click(
|
874 |
+
run_set_params_gui, [prompt_gui, model_name_gui], [
|
875 |
+
prompt_gui,
|
876 |
+
neg_prompt_gui,
|
877 |
+
steps_gui,
|
878 |
+
img_width_gui,
|
879 |
+
img_height_gui,
|
880 |
+
seed_gui,
|
881 |
+
sampler_gui,
|
882 |
+
cfg_gui,
|
883 |
+
clip_skip_gui,
|
884 |
+
model_name_gui,
|
885 |
+
schedule_type_gui,
|
886 |
+
pag_scale_gui,
|
887 |
+
free_u_gui,
|
888 |
+
],
|
889 |
+
)
|
890 |
+
|
891 |
+
def run_clear_prompt_gui():
|
892 |
+
return gr.update(value=""), gr.update(value="")
|
893 |
+
clear_prompt_gui.click(
|
894 |
+
run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
|
895 |
+
)
|
896 |
+
|
897 |
+
def run_set_random_seed():
|
898 |
+
return -1
|
899 |
+
set_random_seed.click(
|
900 |
+
run_set_random_seed, [], seed_gui
|
901 |
+
)
|
902 |
+
|
903 |
+
num_images_gui = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Images")
|
904 |
+
prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
|
905 |
+
vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
|
906 |
+
|
907 |
+
with gr.Accordion("Hires fix", open=False, visible=True):
|
908 |
+
|
909 |
+
upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
|
910 |
+
upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
|
911 |
+
upscaler_tile_size_gui = gr.Slider(minimum=0, maximum=512, step=16, value=0, label="Upscaler Tile Size", info="0 = no tiling")
|
912 |
+
upscaler_tile_overlap_gui = gr.Slider(minimum=0, maximum=48, step=1, value=8, label="Upscaler Tile Overlap")
|
913 |
+
hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
|
914 |
+
hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
|
915 |
+
hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
|
916 |
+
hires_schedule_list = ["Use same schedule type"] + SCHEDULE_TYPE_OPTIONS
|
917 |
+
hires_schedule_type_gui = gr.Dropdown(label="Hires Schedule type", choices=hires_schedule_list, value=hires_schedule_list[0])
|
918 |
+
hires_guidance_scale_gui = gr.Slider(minimum=-1., maximum=30., step=0.5, value=-1., label="Hires CFG", info="If the value is -1, the main CFG will be used")
|
919 |
+
hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
|
920 |
+
hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
|
921 |
+
|
922 |
+
with gr.Accordion("LoRA", open=False, visible=True):
|
923 |
+
|
924 |
+
def lora_dropdown(label, visible=True):
|
925 |
+
return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True, visible=visible)
|
926 |
+
|
927 |
+
def lora_scale_slider(label, visible=True):
|
928 |
+
return gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label=label, visible=visible)
|
929 |
+
|
930 |
+
lora1_gui = lora_dropdown("Lora1")
|
931 |
+
lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
|
932 |
+
lora2_gui = lora_dropdown("Lora2")
|
933 |
+
lora_scale_2_gui = lora_scale_slider("Lora Scale 2")
|
934 |
+
lora3_gui = lora_dropdown("Lora3")
|
935 |
+
lora_scale_3_gui = lora_scale_slider("Lora Scale 3")
|
936 |
+
lora4_gui = lora_dropdown("Lora4")
|
937 |
+
lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
|
938 |
+
lora5_gui = lora_dropdown("Lora5")
|
939 |
+
lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
|
940 |
+
lora6_gui = lora_dropdown("Lora6", visible=False)
|
941 |
+
lora_scale_6_gui = lora_scale_slider("Lora Scale 6", visible=False)
|
942 |
+
lora7_gui = lora_dropdown("Lora7", visible=False)
|
943 |
+
lora_scale_7_gui = lora_scale_slider("Lora Scale 7", visible=False)
|
944 |
+
|
945 |
+
with gr.Accordion("From URL", open=False, visible=True):
|
946 |
+
text_lora = gr.Textbox(
|
947 |
+
label="LoRA's download URL",
|
948 |
+
placeholder="https://civitai.com/api/download/models/28907",
|
949 |
+
lines=1,
|
950 |
+
info="It has to be .safetensors files, and you can also download them from Hugging Face.",
|
951 |
+
)
|
952 |
+
romanize_text = gr.Checkbox(value=False, label="Transliterate name", visible=False)
|
953 |
+
button_lora = gr.Button("Get and Refresh the LoRA Lists")
|
954 |
+
new_lora_status = gr.HTML()
|
955 |
+
button_lora.click(
|
956 |
+
get_my_lora,
|
957 |
+
[text_lora, romanize_text],
|
958 |
+
[lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, lora6_gui, lora7_gui, new_lora_status]
|
959 |
+
)
|
960 |
+
|
961 |
+
with gr.Accordion("Face restoration", open=False, visible=True):
|
962 |
+
|
963 |
+
face_rest_options = [None] + FACE_RESTORATION_MODELS
|
964 |
+
|
965 |
+
face_restoration_model_gui = gr.Dropdown(label="Face restoration model", choices=face_rest_options, value=face_rest_options[0])
|
966 |
+
face_restoration_visibility_gui = gr.Slider(minimum=0., maximum=1., step=0.001, value=1., label="Visibility")
|
967 |
+
face_restoration_weight_gui = gr.Slider(minimum=0., maximum=1., step=0.001, value=.5, label="Weight", info="(0 = maximum effect, 1 = minimum effect)")
|
968 |
+
|
969 |
+
with gr.Accordion("IP-Adapter", open=False, visible=True):
|
970 |
+
|
971 |
+
IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
|
972 |
+
MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
|
973 |
+
|
974 |
+
with gr.Accordion("IP-Adapter 1", open=False, visible=True):
|
975 |
+
image_ip1 = gr.Image(label="IP Image", type="filepath")
|
976 |
+
mask_ip1 = gr.Image(label="IP Mask", type="filepath")
|
977 |
+
model_ip1 = gr.Dropdown(value="plus_face", label="Model", choices=IP_MODELS)
|
978 |
+
mode_ip1 = gr.Dropdown(value="original", label="Mode", choices=MODE_IP_OPTIONS)
|
979 |
+
scale_ip1 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
|
980 |
+
with gr.Accordion("IP-Adapter 2", open=False, visible=True):
|
981 |
+
image_ip2 = gr.Image(label="IP Image", type="filepath")
|
982 |
+
mask_ip2 = gr.Image(label="IP Mask (optional)", type="filepath")
|
983 |
+
model_ip2 = gr.Dropdown(value="base", label="Model", choices=IP_MODELS)
|
984 |
+
mode_ip2 = gr.Dropdown(value="style", label="Mode", choices=MODE_IP_OPTIONS)
|
985 |
+
scale_ip2 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
|
986 |
+
|
987 |
+
with gr.Accordion("ControlNet / Img2img / Inpaint", open=False, visible=True):
|
988 |
+
image_control = gr.Image(label="Image ControlNet/Inpaint/Img2img", type="filepath")
|
989 |
+
image_mask_gui = gr.Image(label="Image Mask", type="filepath")
|
990 |
+
strength_gui = gr.Slider(
|
991 |
+
minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
|
992 |
+
info="This option adjusts the level of changes for img2img, repaint and inpaint."
|
993 |
+
)
|
994 |
+
image_resolution_gui = gr.Slider(
|
995 |
+
minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
|
996 |
+
info="The maximum proportional size of the generated image based on the uploaded image."
|
997 |
+
)
|
998 |
+
controlnet_model_gui = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0])
|
999 |
+
control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
|
1000 |
+
control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
|
1001 |
+
control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
|
1002 |
+
preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=TASK_AND_PREPROCESSORS["canny"])
|
1003 |
+
|
1004 |
+
def change_preprocessor_choices(task):
|
1005 |
+
task = TASK_STABLEPY[task]
|
1006 |
+
if task in TASK_AND_PREPROCESSORS.keys():
|
1007 |
+
choices_task = TASK_AND_PREPROCESSORS[task]
|
1008 |
+
else:
|
1009 |
+
choices_task = TASK_AND_PREPROCESSORS["canny"]
|
1010 |
+
return gr.update(choices=choices_task, value=choices_task[0])
|
1011 |
+
task_gui.change(
|
1012 |
+
change_preprocessor_choices,
|
1013 |
+
[task_gui],
|
1014 |
+
[preprocessor_name_gui],
|
1015 |
+
)
|
1016 |
+
|
1017 |
+
preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
|
1018 |
+
low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
|
1019 |
+
high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
|
1020 |
+
value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
|
1021 |
+
distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
|
1022 |
+
recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
|
1023 |
+
tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
|
1024 |
+
|
1025 |
+
with gr.Accordion("T2I adapter", open=False, visible=False):
|
1026 |
+
t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
|
1027 |
+
adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
|
1028 |
+
adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
|
1029 |
+
|
1030 |
+
with gr.Accordion("Styles", open=False, visible=True):
|
1031 |
+
|
1032 |
+
try:
|
1033 |
+
style_names_found = sd_gen.model.STYLE_NAMES
|
1034 |
+
except Exception:
|
1035 |
+
style_names_found = STYLE_NAMES
|
1036 |
+
|
1037 |
+
style_prompt_gui = gr.Dropdown(
|
1038 |
+
style_names_found,
|
1039 |
+
multiselect=True,
|
1040 |
+
value=None,
|
1041 |
+
label="Style Prompt",
|
1042 |
+
interactive=True,
|
1043 |
+
)
|
1044 |
+
style_json_gui = gr.File(label="Style JSON File")
|
1045 |
+
style_button = gr.Button("Load styles")
|
1046 |
+
|
1047 |
+
def load_json_style_file(json):
|
1048 |
+
if not sd_gen.model:
|
1049 |
+
gr.Info("First load the model")
|
1050 |
+
return gr.update(value=None, choices=STYLE_NAMES)
|
1051 |
+
|
1052 |
+
sd_gen.model.load_style_file(json)
|
1053 |
+
gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
|
1054 |
+
return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
|
1055 |
+
|
1056 |
+
style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
|
1057 |
+
|
1058 |
+
with gr.Accordion("Textual inversion", open=False, visible=False):
|
1059 |
+
active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
|
1060 |
+
|
1061 |
+
with gr.Accordion("Detailfix", open=False, visible=True):
|
1062 |
+
|
1063 |
+
# Adetailer Inpaint Only
|
1064 |
+
adetailer_inpaint_only_gui = gr.Checkbox(label="Inpaint only", value=True)
|
1065 |
+
|
1066 |
+
# Adetailer Verbose
|
1067 |
+
adetailer_verbose_gui = gr.Checkbox(label="Verbose", value=False)
|
1068 |
+
|
1069 |
+
# Adetailer Sampler
|
1070 |
+
adetailer_sampler_gui = gr.Dropdown(label="Adetailer sampler:", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
|
1071 |
+
|
1072 |
+
with gr.Accordion("Detailfix A", open=False, visible=True):
|
1073 |
+
# Adetailer A
|
1074 |
+
adetailer_active_a_gui = gr.Checkbox(label="Enable Adetailer A", value=False)
|
1075 |
+
prompt_ad_a_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
|
1076 |
+
negative_prompt_ad_a_gui = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
|
1077 |
+
strength_ad_a_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
|
1078 |
+
face_detector_ad_a_gui = gr.Checkbox(label="Face detector", value=True)
|
1079 |
+
person_detector_ad_a_gui = gr.Checkbox(label="Person detector", value=False)
|
1080 |
+
hand_detector_ad_a_gui = gr.Checkbox(label="Hand detector", value=False)
|
1081 |
+
mask_dilation_a_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
|
1082 |
+
mask_blur_a_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
|
1083 |
+
mask_padding_a_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
|
1084 |
+
|
1085 |
+
with gr.Accordion("Detailfix B", open=False, visible=True):
|
1086 |
+
# Adetailer B
|
1087 |
+
adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False)
|
1088 |
+
prompt_ad_b_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
|
1089 |
+
negative_prompt_ad_b_gui = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
|
1090 |
+
strength_ad_b_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
|
1091 |
+
face_detector_ad_b_gui = gr.Checkbox(label="Face detector", value=False)
|
1092 |
+
person_detector_ad_b_gui = gr.Checkbox(label="Person detector", value=True)
|
1093 |
+
hand_detector_ad_b_gui = gr.Checkbox(label="Hand detector", value=False)
|
1094 |
+
mask_dilation_b_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
|
1095 |
+
mask_blur_b_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
|
1096 |
+
mask_padding_b_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
|
1097 |
+
|
1098 |
+
with gr.Accordion("Other settings", open=False, visible=True):
|
1099 |
+
schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
|
1100 |
+
guidance_rescale_gui = gr.Number(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
|
1101 |
+
save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
|
1102 |
+
filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
|
1103 |
+
hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
|
1104 |
+
hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
|
1105 |
+
generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
|
1106 |
+
|
1107 |
+
with gr.Accordion("More settings", open=False, visible=False):
|
1108 |
+
loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
|
1109 |
+
retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
|
1110 |
+
leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
|
1111 |
+
disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
|
1112 |
+
display_images_gui = gr.Checkbox(value=False, label="Display Images")
|
1113 |
+
image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
|
1114 |
+
image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
|
1115 |
+
retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
|
1116 |
+
retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
|
1117 |
+
retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
|
1118 |
+
xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
|
1119 |
+
|
1120 |
+
with gr.Accordion("Examples and help", open=False, visible=True):
|
1121 |
+
gr.Markdown(HELP_GUI)
|
1122 |
+
gr.Markdown(EXAMPLES_GUI_HELP)
|
1123 |
+
gr.Examples(
|
1124 |
+
examples=EXAMPLES_GUI,
|
1125 |
+
fn=sd_gen.generate_pipeline,
|
1126 |
+
inputs=[
|
1127 |
+
prompt_gui,
|
1128 |
+
neg_prompt_gui,
|
1129 |
+
steps_gui,
|
1130 |
+
cfg_gui,
|
1131 |
+
seed_gui,
|
1132 |
+
lora1_gui,
|
1133 |
+
lora_scale_1_gui,
|
1134 |
+
sampler_gui,
|
1135 |
+
img_height_gui,
|
1136 |
+
img_width_gui,
|
1137 |
+
model_name_gui,
|
1138 |
+
task_gui,
|
1139 |
+
image_control,
|
1140 |
+
image_resolution_gui,
|
1141 |
+
strength_gui,
|
1142 |
+
control_net_output_scaling_gui,
|
1143 |
+
control_net_start_threshold_gui,
|
1144 |
+
control_net_stop_threshold_gui,
|
1145 |
+
prompt_syntax_gui,
|
1146 |
+
upscaler_model_path_gui,
|
1147 |
+
gpu_duration_gui,
|
1148 |
+
load_lora_cpu_gui,
|
1149 |
+
],
|
1150 |
+
outputs=[load_model_gui, result_images, actual_task_info],
|
1151 |
+
cache_examples=False,
|
1152 |
+
)
|
1153 |
+
gr.Markdown(RESOURCES)
|
1154 |
+
|
1155 |
+
with gr.Tab("Inpaint mask maker", render=True):
|
1156 |
+
|
1157 |
+
with gr.Row():
|
1158 |
+
with gr.Column(scale=2):
|
1159 |
+
image_base = gr.ImageEditor(
|
1160 |
+
sources=["upload", "clipboard"],
|
1161 |
+
# crop_size="1:1",
|
1162 |
+
# enable crop (or disable it)
|
1163 |
+
# transforms=["crop"],
|
1164 |
+
brush=gr.Brush(
|
1165 |
+
default_size="16", # or leave it as 'auto'
|
1166 |
+
color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
|
1167 |
+
# default_color="black", # html names are supported
|
1168 |
+
colors=[
|
1169 |
+
"rgba(0, 0, 0, 1)", # rgb(a)
|
1170 |
+
"rgba(0, 0, 0, 0.1)",
|
1171 |
+
"rgba(255, 255, 255, 0.1)",
|
1172 |
+
# "hsl(360, 120, 120)" # in fact any valid colorstring
|
1173 |
+
]
|
1174 |
+
),
|
1175 |
+
eraser=gr.Eraser(default_size="16")
|
1176 |
+
)
|
1177 |
+
invert_mask = gr.Checkbox(value=False, label="Invert mask")
|
1178 |
+
btn = gr.Button("Create mask")
|
1179 |
+
with gr.Column(scale=1):
|
1180 |
+
img_source = gr.Image(interactive=False)
|
1181 |
+
img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
|
1182 |
+
btn_send = gr.Button("Send to the first tab")
|
1183 |
+
|
1184 |
+
btn.click(create_mask_now, [image_base, invert_mask], [img_source, img_result])
|
1185 |
+
|
1186 |
+
def send_img(img_source, img_result):
|
1187 |
+
return img_source, img_result
|
1188 |
+
btn_send.click(send_img, [img_source, img_result], [image_control, image_mask_gui])
|
1189 |
+
|
1190 |
+
with gr.Tab("PNG Info"):
|
1191 |
+
|
1192 |
+
with gr.Row():
|
1193 |
+
with gr.Column():
|
1194 |
+
image_metadata = gr.Image(label="Image with metadata", type="pil", sources=["upload"])
|
1195 |
+
|
1196 |
+
with gr.Column():
|
1197 |
+
result_metadata = gr.Textbox(label="Metadata", show_label=True, show_copy_button=True, interactive=False, container=True, max_lines=99)
|
1198 |
+
|
1199 |
+
image_metadata.change(
|
1200 |
+
fn=extract_exif_data,
|
1201 |
+
inputs=[image_metadata],
|
1202 |
+
outputs=[result_metadata],
|
1203 |
+
)
|
1204 |
+
|
1205 |
+
with gr.Tab("Upscaler"):
|
1206 |
+
|
1207 |
+
with gr.Row():
|
1208 |
+
with gr.Column():
|
1209 |
+
|
1210 |
+
USCALER_TAB_KEYS = [name for name in UPSCALER_KEYS[9:]]
|
1211 |
+
|
1212 |
+
image_up_tab = gr.Image(label="Image", type="pil", sources=["upload"])
|
1213 |
+
upscaler_tab = gr.Dropdown(label="Upscaler", choices=USCALER_TAB_KEYS, value=USCALER_TAB_KEYS[5])
|
1214 |
+
upscaler_size_tab = gr.Slider(minimum=1., maximum=4., step=0.1, value=1.1, label="Upscale by")
|
1215 |
+
generate_button_up_tab = gr.Button(value="START UPSCALE", variant="primary")
|
1216 |
+
|
1217 |
+
with gr.Column():
|
1218 |
+
result_up_tab = gr.Image(label="Result", type="pil", interactive=False, format="png")
|
1219 |
+
|
1220 |
+
generate_button_up_tab.click(
|
1221 |
+
fn=process_upscale,
|
1222 |
+
inputs=[image_up_tab, upscaler_tab, upscaler_size_tab],
|
1223 |
+
outputs=[result_up_tab],
|
1224 |
+
)
|
1225 |
+
|
1226 |
+
with gr.Tab("Preprocessor", render=True):
|
1227 |
+
preprocessor_tab()
|
1228 |
+
|
1229 |
+
generate_button.click(
|
1230 |
+
fn=sd_gen.load_new_model,
|
1231 |
+
inputs=[
|
1232 |
+
model_name_gui,
|
1233 |
+
vae_model_gui,
|
1234 |
+
task_gui,
|
1235 |
+
controlnet_model_gui,
|
1236 |
+
],
|
1237 |
+
outputs=[load_model_gui],
|
1238 |
+
queue=True,
|
1239 |
+
show_progress="minimal",
|
1240 |
+
).success(
|
1241 |
+
fn=sd_gen_generate_pipeline, # fn=sd_gen.generate_pipeline,
|
1242 |
+
inputs=[
|
1243 |
+
prompt_gui,
|
1244 |
+
neg_prompt_gui,
|
1245 |
+
num_images_gui,
|
1246 |
+
steps_gui,
|
1247 |
+
cfg_gui,
|
1248 |
+
clip_skip_gui,
|
1249 |
+
seed_gui,
|
1250 |
+
lora1_gui,
|
1251 |
+
lora_scale_1_gui,
|
1252 |
+
lora2_gui,
|
1253 |
+
lora_scale_2_gui,
|
1254 |
+
lora3_gui,
|
1255 |
+
lora_scale_3_gui,
|
1256 |
+
lora4_gui,
|
1257 |
+
lora_scale_4_gui,
|
1258 |
+
lora5_gui,
|
1259 |
+
lora_scale_5_gui,
|
1260 |
+
lora6_gui,
|
1261 |
+
lora_scale_6_gui,
|
1262 |
+
lora7_gui,
|
1263 |
+
lora_scale_7_gui,
|
1264 |
+
sampler_gui,
|
1265 |
+
schedule_type_gui,
|
1266 |
+
schedule_prediction_type_gui,
|
1267 |
+
img_height_gui,
|
1268 |
+
img_width_gui,
|
1269 |
+
model_name_gui,
|
1270 |
+
vae_model_gui,
|
1271 |
+
task_gui,
|
1272 |
+
image_control,
|
1273 |
+
preprocessor_name_gui,
|
1274 |
+
preprocess_resolution_gui,
|
1275 |
+
image_resolution_gui,
|
1276 |
+
style_prompt_gui,
|
1277 |
+
style_json_gui,
|
1278 |
+
image_mask_gui,
|
1279 |
+
strength_gui,
|
1280 |
+
low_threshold_gui,
|
1281 |
+
high_threshold_gui,
|
1282 |
+
value_threshold_gui,
|
1283 |
+
distance_threshold_gui,
|
1284 |
+
recolor_gamma_correction_gui,
|
1285 |
+
tile_blur_sigma_gui,
|
1286 |
+
control_net_output_scaling_gui,
|
1287 |
+
control_net_start_threshold_gui,
|
1288 |
+
control_net_stop_threshold_gui,
|
1289 |
+
active_textual_inversion_gui,
|
1290 |
+
prompt_syntax_gui,
|
1291 |
+
upscaler_model_path_gui,
|
1292 |
+
upscaler_increases_size_gui,
|
1293 |
+
upscaler_tile_size_gui,
|
1294 |
+
upscaler_tile_overlap_gui,
|
1295 |
+
hires_steps_gui,
|
1296 |
+
hires_denoising_strength_gui,
|
1297 |
+
hires_sampler_gui,
|
1298 |
+
hires_prompt_gui,
|
1299 |
+
hires_negative_prompt_gui,
|
1300 |
+
hires_before_adetailer_gui,
|
1301 |
+
hires_after_adetailer_gui,
|
1302 |
+
hires_schedule_type_gui,
|
1303 |
+
hires_guidance_scale_gui,
|
1304 |
+
controlnet_model_gui,
|
1305 |
+
loop_generation_gui,
|
1306 |
+
leave_progress_bar_gui,
|
1307 |
+
disable_progress_bar_gui,
|
1308 |
+
image_previews_gui,
|
1309 |
+
display_images_gui,
|
1310 |
+
save_generated_images_gui,
|
1311 |
+
filename_pattern_gui,
|
1312 |
+
image_storage_location_gui,
|
1313 |
+
retain_compel_previous_load_gui,
|
1314 |
+
retain_detailfix_model_previous_load_gui,
|
1315 |
+
retain_hires_model_previous_load_gui,
|
1316 |
+
t2i_adapter_preprocessor_gui,
|
1317 |
+
adapter_conditioning_scale_gui,
|
1318 |
+
adapter_conditioning_factor_gui,
|
1319 |
+
xformers_memory_efficient_attention_gui,
|
1320 |
+
free_u_gui,
|
1321 |
+
generator_in_cpu_gui,
|
1322 |
+
adetailer_inpaint_only_gui,
|
1323 |
+
adetailer_verbose_gui,
|
1324 |
+
adetailer_sampler_gui,
|
1325 |
+
adetailer_active_a_gui,
|
1326 |
+
prompt_ad_a_gui,
|
1327 |
+
negative_prompt_ad_a_gui,
|
1328 |
+
strength_ad_a_gui,
|
1329 |
+
face_detector_ad_a_gui,
|
1330 |
+
person_detector_ad_a_gui,
|
1331 |
+
hand_detector_ad_a_gui,
|
1332 |
+
mask_dilation_a_gui,
|
1333 |
+
mask_blur_a_gui,
|
1334 |
+
mask_padding_a_gui,
|
1335 |
+
adetailer_active_b_gui,
|
1336 |
+
prompt_ad_b_gui,
|
1337 |
+
negative_prompt_ad_b_gui,
|
1338 |
+
strength_ad_b_gui,
|
1339 |
+
face_detector_ad_b_gui,
|
1340 |
+
person_detector_ad_b_gui,
|
1341 |
+
hand_detector_ad_b_gui,
|
1342 |
+
mask_dilation_b_gui,
|
1343 |
+
mask_blur_b_gui,
|
1344 |
+
mask_padding_b_gui,
|
1345 |
+
retain_task_cache_gui,
|
1346 |
+
guidance_rescale_gui,
|
1347 |
+
image_ip1,
|
1348 |
+
mask_ip1,
|
1349 |
+
model_ip1,
|
1350 |
+
mode_ip1,
|
1351 |
+
scale_ip1,
|
1352 |
+
image_ip2,
|
1353 |
+
mask_ip2,
|
1354 |
+
model_ip2,
|
1355 |
+
mode_ip2,
|
1356 |
+
scale_ip2,
|
1357 |
+
pag_scale_gui,
|
1358 |
+
face_restoration_model_gui,
|
1359 |
+
face_restoration_visibility_gui,
|
1360 |
+
face_restoration_weight_gui,
|
1361 |
+
load_lora_cpu_gui,
|
1362 |
+
verbose_info_gui,
|
1363 |
+
gpu_duration_gui,
|
1364 |
+
],
|
1365 |
+
outputs=[load_model_gui, result_images, actual_task_info],
|
1366 |
+
queue=True,
|
1367 |
+
show_progress="minimal",
|
1368 |
+
)
|
1369 |
+
|
1370 |
+
app.queue()
|
1371 |
|
1372 |
+
app.launch(
|
1373 |
+
show_error=True,
|
1374 |
+
debug=True,
|
1375 |
+
allowed_paths=["./images/"],
|
|
|
1376 |
)
|