r3gm commited on
Commit
bf1d964
1 Parent(s): 91283c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -52
app.py CHANGED
@@ -5,6 +5,7 @@ from stablepy import (
5
  SCHEDULE_TYPE_OPTIONS,
6
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
7
  check_scheduler_compatibility,
 
8
  )
9
  from constants import (
10
  DIRECTORY_MODELS,
@@ -19,7 +20,6 @@ from constants import (
19
  DOWNLOAD_EMBEDS,
20
  CIVITAI_API_KEY,
21
  HF_TOKEN,
22
- PREPROCESSOR_CONTROLNET,
23
  TASK_STABLEPY,
24
  TASK_MODEL_LIST,
25
  UPSCALER_DICT_GUI,
@@ -34,6 +34,7 @@ from constants import (
34
  EXAMPLES_GUI_HELP,
35
  EXAMPLES_GUI,
36
  RESOURCES,
 
37
  )
38
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
39
  import torch
@@ -60,15 +61,18 @@ from utils import (
60
  html_template_message,
61
  escape_html,
62
  )
 
63
  from datetime import datetime
64
  import gradio as gr
65
  import logging
66
  import diffusers
67
  import warnings
68
  from stablepy import logger
 
69
  # import urllib.parse
70
 
71
  ImageFile.LOAD_TRUNCATED_IMAGES = True
 
72
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
73
  print(os.getenv("SPACES_ZERO_GPU"))
74
 
@@ -108,6 +112,16 @@ vae_model_list.insert(0, "None")
108
 
109
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
110
 
 
 
 
 
 
 
 
 
 
 
111
  #######################
112
  # GUI
113
  #######################
@@ -148,12 +162,12 @@ class GuiSD:
148
  ] + [model_name]
149
  print(self.inventory)
150
 
151
- def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
152
-
153
- self.update_storage_models()
154
 
155
  # download link model > model_name
156
 
 
 
157
  vae_model = vae_model if vae_model != "None" else None
158
  model_type = get_model_type(model_name)
159
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
@@ -205,17 +219,19 @@ class GuiSD:
205
  vae_model=vae_model,
206
  type_model_precision=dtype_model,
207
  retain_task_model_in_cache=False,
 
208
  device="cpu",
 
209
  )
 
210
  else:
211
-
212
  if self.model.base_model_id != model_name:
213
  load_now_time = datetime.now()
214
  elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
215
 
216
- if elapsed_time <= 8:
217
  print("Waiting for the previous model's time ops...")
218
- time.sleep(8-elapsed_time)
219
 
220
  self.model.device = torch.device("cpu")
221
  self.model.load_pipe(
@@ -224,6 +240,7 @@ class GuiSD:
224
  vae_model=vae_model,
225
  type_model_precision=dtype_model,
226
  retain_task_model_in_cache=False,
 
227
  )
228
 
229
  end_time = time.time()
@@ -260,6 +277,10 @@ class GuiSD:
260
  lora_scale4,
261
  lora5,
262
  lora_scale5,
 
 
 
 
263
  sampler,
264
  schedule_type,
265
  schedule_prediction_type,
@@ -280,6 +301,8 @@ class GuiSD:
280
  high_threshold,
281
  value_threshold,
282
  distance_threshold,
 
 
283
  controlnet_output_scaling_in_unet,
284
  controlnet_start_threshold,
285
  controlnet_stop_threshold,
@@ -296,6 +319,9 @@ class GuiSD:
296
  hires_negative_prompt,
297
  hires_before_adetailer,
298
  hires_after_adetailer,
 
 
 
299
  loop_generation,
300
  leave_progress_bar,
301
  disable_progress_bar,
@@ -337,6 +363,7 @@ class GuiSD:
337
  mask_blur_b,
338
  mask_padding_b,
339
  retain_task_cache_gui,
 
340
  image_ip1,
341
  mask_ip1,
342
  model_ip1,
@@ -353,7 +380,7 @@ class GuiSD:
353
  yield info_state, gr.update(), gr.update()
354
 
355
  vae_model = vae_model if vae_model != "None" else None
356
- loras_list = [lora1, lora2, lora3, lora4, lora5]
357
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
358
  msg_lora = ""
359
 
@@ -456,6 +483,8 @@ class GuiSD:
456
  "high_threshold": high_threshold,
457
  "value_threshold": value_threshold,
458
  "distance_threshold": distance_threshold,
 
 
459
  "lora_A": lora1 if lora1 != "None" else None,
460
  "lora_scale_A": lora_scale1,
461
  "lora_B": lora2 if lora2 != "None" else None,
@@ -466,6 +495,10 @@ class GuiSD:
466
  "lora_scale_D": lora_scale4,
467
  "lora_E": lora5 if lora5 != "None" else None,
468
  "lora_scale_E": lora_scale5,
 
 
 
 
469
  "textual_inversion": embed_list if textual_inversion else [],
470
  "syntax_weights": syntax_weights, # "Classic"
471
  "sampler": sampler,
@@ -507,6 +540,8 @@ class GuiSD:
507
  "hires_sampler": hires_sampler,
508
  "hires_before_adetailer": hires_before_adetailer,
509
  "hires_after_adetailer": hires_after_adetailer,
 
 
510
  "ip_adapter_image": params_ip_img,
511
  "ip_adapter_mask": params_ip_msk,
512
  "ip_adapter_model": params_ip_model,
@@ -514,8 +549,12 @@ class GuiSD:
514
  "ip_adapter_scale": params_ip_scale,
515
  }
516
 
 
 
 
 
517
  self.model.device = torch.device("cuda:0")
518
- if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
519
  self.model.pipe.transformer.to(self.model.device)
520
  print("transformer to cuda")
521
 
@@ -543,7 +582,7 @@ class GuiSD:
543
  if msg_lora:
544
  info_images += msg_lora
545
 
546
- info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[0]) + "<br>-------<br>"
547
 
548
  download_links = "<br>".join(
549
  [
@@ -575,37 +614,38 @@ def dummy_gpu():
575
 
576
 
577
  def sd_gen_generate_pipeline(*args):
578
-
579
  gpu_duration_arg = int(args[-1]) if args[-1] else 59
580
  verbose_arg = int(args[-2])
581
  load_lora_cpu = args[-3]
582
  generation_args = args[:-3]
583
  lora_list = [
584
  None if item == "None" else item
585
- for item in [args[7], args[9], args[11], args[13], args[15]]
586
  ]
587
- lora_status = [None] * 5
588
 
589
  msg_load_lora = "Updating LoRAs in GPU..."
590
  if load_lora_cpu:
591
- msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
592
 
593
- if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
594
  yield msg_load_lora, gr.update(), gr.update()
595
 
596
  # Load lora in CPU
597
  if load_lora_cpu:
598
- lora_status = sd_gen.model.lora_merge(
599
  lora_A=lora_list[0], lora_scale_A=args[8],
600
  lora_B=lora_list[1], lora_scale_B=args[10],
601
  lora_C=lora_list[2], lora_scale_C=args[12],
602
  lora_D=lora_list[3], lora_scale_D=args[14],
603
  lora_E=lora_list[4], lora_scale_E=args[16],
 
 
604
  )
605
  print(lora_status)
606
 
607
- sampler_name = args[17]
608
- schedule_type_name = args[18]
609
  _, _, msg_sampler = check_scheduler_compatibility(
610
  sd_gen.model.class_name, sampler_name, schedule_type_name
611
  )
@@ -619,7 +659,7 @@ def sd_gen_generate_pipeline(*args):
619
  elif status is not None:
620
  gr.Warning(f"Failed to load LoRA: {lora}")
621
 
622
- if lora_status == [None] * 5 and sd_gen.model.lora_memory != [None] * 5 and load_lora_cpu:
623
  lora_cache_msg = ", ".join(
624
  str(x) for x in sd_gen.model.lora_memory if x is not None
625
  )
@@ -676,6 +716,7 @@ def esrgan_upscale(image, upscaler_name, upscaler_size):
676
  return image_path
677
 
678
 
 
679
  dynamic_gpu_duration.zerogpu = True
680
  sd_gen_generate_pipeline.zerogpu = True
681
  sd_gen = GuiSD()
@@ -699,7 +740,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
699
  task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
700
  model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
701
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
702
- neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt")
703
  with gr.Row(equal_height=False):
704
  set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
705
  clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
@@ -733,10 +774,10 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
733
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
734
  with gr.Column():
735
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
736
- load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU (Save GPU time)")
737
 
738
  with gr.Column(scale=1):
739
- steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
740
  cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
741
  sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
742
  schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
@@ -864,16 +905,19 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
864
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
865
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
866
  hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
 
 
 
867
  hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
868
  hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
869
 
870
  with gr.Accordion("LoRA", open=False, visible=True):
871
 
872
- def lora_dropdown(label):
873
- return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True)
874
 
875
- def lora_scale_slider(label):
876
- return gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label=label)
877
 
878
  lora1_gui = lora_dropdown("Lora1")
879
  lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
@@ -885,6 +929,10 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
885
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
886
  lora5_gui = lora_dropdown("Lora5")
887
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
 
 
 
 
888
 
889
  with gr.Accordion("From URL", open=False, visible=True):
890
  text_lora = gr.Textbox(
@@ -893,13 +941,13 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
893
  lines=1,
894
  info="It has to be .safetensors files, and you can also download them from Hugging Face.",
895
  )
896
- romanize_text = gr.Checkbox(value=False, label="Transliterate name")
897
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
898
  new_lora_status = gr.HTML()
899
  button_lora.click(
900
  get_my_lora,
901
  [text_lora, romanize_text],
902
- [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, new_lora_status]
903
  )
904
 
905
  with gr.Accordion("IP-Adapter", open=False, visible=True):
@@ -931,29 +979,32 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
931
  minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
932
  info="The maximum proportional size of the generated image based on the uploaded image."
933
  )
934
- preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=PREPROCESSOR_CONTROLNET["canny"])
 
 
 
 
935
 
936
  def change_preprocessor_choices(task):
937
  task = TASK_STABLEPY[task]
938
- if task in PREPROCESSOR_CONTROLNET.keys():
939
- choices_task = PREPROCESSOR_CONTROLNET[task]
940
  else:
941
- choices_task = PREPROCESSOR_CONTROLNET["canny"]
942
  return gr.update(choices=choices_task, value=choices_task[0])
943
-
944
  task_gui.change(
945
  change_preprocessor_choices,
946
  [task_gui],
947
  [preprocessor_name_gui],
948
  )
949
- preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocess Resolution")
950
- low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="Canny low threshold")
951
- high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="Canny high threshold")
952
- value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="Hough value threshold (MLSD)")
953
- distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="Hough distance threshold (MLSD)")
954
- control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
955
- control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
956
- control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
957
 
958
  with gr.Accordion("T2I adapter", open=False, visible=False):
959
  t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
@@ -1030,6 +1081,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1030
 
1031
  with gr.Accordion("Other settings", open=False, visible=True):
1032
  schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
 
1033
  save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
1034
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1035
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
@@ -1094,15 +1146,15 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1094
  # enable crop (or disable it)
1095
  # transforms=["crop"],
1096
  brush=gr.Brush(
1097
- default_size="16", # or leave it as 'auto'
1098
- color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
1099
- # default_color="black", # html names are supported
1100
- colors=[
1101
- "rgba(0, 0, 0, 1)", # rgb(a)
1102
- "rgba(0, 0, 0, 0.1)",
1103
- "rgba(255, 255, 255, 0.1)",
1104
- # "hsl(360, 120, 120)" # in fact any valid colorstring
1105
- ]
1106
  ),
1107
  eraser=gr.Eraser(default_size="16")
1108
  )
@@ -1152,12 +1204,16 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1152
  outputs=[result_up_tab],
1153
  )
1154
 
 
 
 
1155
  generate_button.click(
1156
  fn=sd_gen.load_new_model,
1157
  inputs=[
1158
  model_name_gui,
1159
  vae_model_gui,
1160
- task_gui
 
1161
  ],
1162
  outputs=[load_model_gui],
1163
  queue=True,
@@ -1182,6 +1238,10 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1182
  lora_scale_4_gui,
1183
  lora5_gui,
1184
  lora_scale_5_gui,
 
 
 
 
1185
  sampler_gui,
1186
  schedule_type_gui,
1187
  schedule_prediction_type_gui,
@@ -1202,6 +1262,8 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1202
  high_threshold_gui,
1203
  value_threshold_gui,
1204
  distance_threshold_gui,
 
 
1205
  control_net_output_scaling_gui,
1206
  control_net_start_threshold_gui,
1207
  control_net_stop_threshold_gui,
@@ -1218,6 +1280,9 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1218
  hires_negative_prompt_gui,
1219
  hires_before_adetailer_gui,
1220
  hires_after_adetailer_gui,
 
 
 
1221
  loop_generation_gui,
1222
  leave_progress_bar_gui,
1223
  disable_progress_bar_gui,
@@ -1259,6 +1324,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1259
  mask_blur_b_gui,
1260
  mask_padding_b_gui,
1261
  retain_task_cache_gui,
 
1262
  image_ip1,
1263
  mask_ip1,
1264
  model_ip1,
@@ -1285,4 +1351,4 @@ app.launch(
1285
  show_error=True,
1286
  debug=True,
1287
  allowed_paths=["./images/"],
1288
- )
 
5
  SCHEDULE_TYPE_OPTIONS,
6
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
7
  check_scheduler_compatibility,
8
+ TASK_AND_PREPROCESSORS,
9
  )
10
  from constants import (
11
  DIRECTORY_MODELS,
 
20
  DOWNLOAD_EMBEDS,
21
  CIVITAI_API_KEY,
22
  HF_TOKEN,
 
23
  TASK_STABLEPY,
24
  TASK_MODEL_LIST,
25
  UPSCALER_DICT_GUI,
 
34
  EXAMPLES_GUI_HELP,
35
  EXAMPLES_GUI,
36
  RESOURCES,
37
+ DIFFUSERS_CONTROLNET_MODEL,
38
  )
39
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
40
  import torch
 
61
  html_template_message,
62
  escape_html,
63
  )
64
+ from image_processor import preprocessor_tab
65
  from datetime import datetime
66
  import gradio as gr
67
  import logging
68
  import diffusers
69
  import warnings
70
  from stablepy import logger
71
+ from diffusers import FluxPipeline
72
  # import urllib.parse
73
 
74
  ImageFile.LOAD_TRUNCATED_IMAGES = True
75
+ torch.backends.cuda.matmul.allow_tf32 = True
76
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
77
  print(os.getenv("SPACES_ZERO_GPU"))
78
 
 
112
 
113
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
114
 
115
+ flux_repo = "camenduru/FLUX.1-dev-diffusers"
116
+ flux_pipe = FluxPipeline.from_pretrained(
117
+ flux_repo,
118
+ transformer=None,
119
+ torch_dtype=torch.bfloat16,
120
+ ).to("cuda")
121
+ components = flux_pipe.components
122
+ components.pop("transformer", None)
123
+ delete_model(flux_repo)
124
+
125
  #######################
126
  # GUI
127
  #######################
 
162
  ] + [model_name]
163
  print(self.inventory)
164
 
165
+ def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
 
 
166
 
167
  # download link model > model_name
168
 
169
+ self.update_storage_models()
170
+
171
  vae_model = vae_model if vae_model != "None" else None
172
  model_type = get_model_type(model_name)
173
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
 
219
  vae_model=vae_model,
220
  type_model_precision=dtype_model,
221
  retain_task_model_in_cache=False,
222
+ controlnet_model=controlnet_model,
223
  device="cpu",
224
+ env_components=components,
225
  )
226
+ self.model.advanced_params(image_preprocessor_cuda_active=True)
227
  else:
 
228
  if self.model.base_model_id != model_name:
229
  load_now_time = datetime.now()
230
  elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
231
 
232
+ if elapsed_time <= 9:
233
  print("Waiting for the previous model's time ops...")
234
+ time.sleep(9 - elapsed_time)
235
 
236
  self.model.device = torch.device("cpu")
237
  self.model.load_pipe(
 
240
  vae_model=vae_model,
241
  type_model_precision=dtype_model,
242
  retain_task_model_in_cache=False,
243
+ controlnet_model=controlnet_model,
244
  )
245
 
246
  end_time = time.time()
 
277
  lora_scale4,
278
  lora5,
279
  lora_scale5,
280
+ lora6,
281
+ lora_scale6,
282
+ lora7,
283
+ lora_scale7,
284
  sampler,
285
  schedule_type,
286
  schedule_prediction_type,
 
301
  high_threshold,
302
  value_threshold,
303
  distance_threshold,
304
+ recolor_gamma_correction,
305
+ tile_blur_sigma,
306
  controlnet_output_scaling_in_unet,
307
  controlnet_start_threshold,
308
  controlnet_stop_threshold,
 
319
  hires_negative_prompt,
320
  hires_before_adetailer,
321
  hires_after_adetailer,
322
+ hires_schedule_type,
323
+ hires_guidance_scale,
324
+ controlnet_model,
325
  loop_generation,
326
  leave_progress_bar,
327
  disable_progress_bar,
 
363
  mask_blur_b,
364
  mask_padding_b,
365
  retain_task_cache_gui,
366
+ guidance_rescale,
367
  image_ip1,
368
  mask_ip1,
369
  model_ip1,
 
380
  yield info_state, gr.update(), gr.update()
381
 
382
  vae_model = vae_model if vae_model != "None" else None
383
+ loras_list = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
384
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
385
  msg_lora = ""
386
 
 
483
  "high_threshold": high_threshold,
484
  "value_threshold": value_threshold,
485
  "distance_threshold": distance_threshold,
486
+ "recolor_gamma_correction": float(recolor_gamma_correction),
487
+ "tile_blur_sigma": int(tile_blur_sigma),
488
  "lora_A": lora1 if lora1 != "None" else None,
489
  "lora_scale_A": lora_scale1,
490
  "lora_B": lora2 if lora2 != "None" else None,
 
495
  "lora_scale_D": lora_scale4,
496
  "lora_E": lora5 if lora5 != "None" else None,
497
  "lora_scale_E": lora_scale5,
498
+ "lora_F": lora6 if lora6 != "None" else None,
499
+ "lora_scale_F": lora_scale6,
500
+ "lora_G": lora7 if lora7 != "None" else None,
501
+ "lora_scale_G": lora_scale7,
502
  "textual_inversion": embed_list if textual_inversion else [],
503
  "syntax_weights": syntax_weights, # "Classic"
504
  "sampler": sampler,
 
540
  "hires_sampler": hires_sampler,
541
  "hires_before_adetailer": hires_before_adetailer,
542
  "hires_after_adetailer": hires_after_adetailer,
543
+ "hires_schedule_type": hires_schedule_type,
544
+ "hires_guidance_scale": hires_guidance_scale,
545
  "ip_adapter_image": params_ip_img,
546
  "ip_adapter_mask": params_ip_msk,
547
  "ip_adapter_model": params_ip_model,
 
549
  "ip_adapter_scale": params_ip_scale,
550
  }
551
 
552
+ # kwargs for diffusers pipeline
553
+ if guidance_rescale:
554
+ pipe_params["guidance_rescale"] = guidance_rescale
555
+
556
  self.model.device = torch.device("cuda:0")
557
+ if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
558
  self.model.pipe.transformer.to(self.model.device)
559
  print("transformer to cuda")
560
 
 
582
  if msg_lora:
583
  info_images += msg_lora
584
 
585
+ info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[-1]) + "<br>-------<br>"
586
 
587
  download_links = "<br>".join(
588
  [
 
614
 
615
 
616
  def sd_gen_generate_pipeline(*args):
 
617
  gpu_duration_arg = int(args[-1]) if args[-1] else 59
618
  verbose_arg = int(args[-2])
619
  load_lora_cpu = args[-3]
620
  generation_args = args[:-3]
621
  lora_list = [
622
  None if item == "None" else item
623
+ for item in [args[7], args[9], args[11], args[13], args[15], args[17], args[19]]
624
  ]
625
+ lora_status = [None] * sd_gen.model.num_loras
626
 
627
  msg_load_lora = "Updating LoRAs in GPU..."
628
  if load_lora_cpu:
629
+ msg_load_lora = "Updating LoRAs in CPU..."
630
 
631
+ if lora_list != sd_gen.model.lora_memory and lora_list != [None] * sd_gen.model.num_loras:
632
  yield msg_load_lora, gr.update(), gr.update()
633
 
634
  # Load lora in CPU
635
  if load_lora_cpu:
636
+ lora_status = sd_gen.model.load_lora_on_the_fly(
637
  lora_A=lora_list[0], lora_scale_A=args[8],
638
  lora_B=lora_list[1], lora_scale_B=args[10],
639
  lora_C=lora_list[2], lora_scale_C=args[12],
640
  lora_D=lora_list[3], lora_scale_D=args[14],
641
  lora_E=lora_list[4], lora_scale_E=args[16],
642
+ lora_F=lora_list[5], lora_scale_F=args[18],
643
+ lora_G=lora_list[6], lora_scale_G=args[20],
644
  )
645
  print(lora_status)
646
 
647
+ sampler_name = args[21]
648
+ schedule_type_name = args[22]
649
  _, _, msg_sampler = check_scheduler_compatibility(
650
  sd_gen.model.class_name, sampler_name, schedule_type_name
651
  )
 
659
  elif status is not None:
660
  gr.Warning(f"Failed to load LoRA: {lora}")
661
 
662
+ if lora_status == [None] * sd_gen.model.num_loras and sd_gen.model.lora_memory != [None] * sd_gen.model.num_loras and load_lora_cpu:
663
  lora_cache_msg = ", ".join(
664
  str(x) for x in sd_gen.model.lora_memory if x is not None
665
  )
 
716
  return image_path
717
 
718
 
719
+ # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
720
  dynamic_gpu_duration.zerogpu = True
721
  sd_gen_generate_pipeline.zerogpu = True
722
  sd_gen = GuiSD()
 
740
  task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
741
  model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
742
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
743
+ neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt", value="lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, worst quality, low quality, very displeasing, (bad)")
744
  with gr.Row(equal_height=False):
745
  set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
746
  clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
 
774
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
775
  with gr.Column():
776
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
777
+ load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
778
 
779
  with gr.Column(scale=1):
780
+ steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=28, label="Steps")
781
  cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
782
  sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler")
783
  schedule_type_gui = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
 
905
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
906
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
907
  hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
908
+ hires_schedule_list = ["Use same schedule type"] + SCHEDULE_TYPE_OPTIONS
909
+ hires_schedule_type_gui = gr.Dropdown(label="Hires Schedule type", choices=hires_schedule_list, value=hires_schedule_list[0])
910
+ hires_guidance_scale_gui = gr.Slider(minimum=-1., maximum=30., step=0.5, value=-1., label="Hires CFG", info="If the value is -1, the main CFG will be used")
911
  hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
912
  hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
913
 
914
  with gr.Accordion("LoRA", open=False, visible=True):
915
 
916
+ def lora_dropdown(label, visible=True):
917
+ return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True, visible=visible)
918
 
919
+ def lora_scale_slider(label, visible=True):
920
+ return gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label=label, visible=visible)
921
 
922
  lora1_gui = lora_dropdown("Lora1")
923
  lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
 
929
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
930
  lora5_gui = lora_dropdown("Lora5")
931
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
932
+ lora6_gui = lora_dropdown("Lora6", visible=False)
933
+ lora_scale_6_gui = lora_scale_slider("Lora Scale 6", visible=False)
934
+ lora7_gui = lora_dropdown("Lora7", visible=False)
935
+ lora_scale_7_gui = lora_scale_slider("Lora Scale 7", visible=False)
936
 
937
  with gr.Accordion("From URL", open=False, visible=True):
938
  text_lora = gr.Textbox(
 
941
  lines=1,
942
  info="It has to be .safetensors files, and you can also download them from Hugging Face.",
943
  )
944
+ romanize_text = gr.Checkbox(value=False, label="Transliterate name", visible=False)
945
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
946
  new_lora_status = gr.HTML()
947
  button_lora.click(
948
  get_my_lora,
949
  [text_lora, romanize_text],
950
+ [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, lora6_gui, lora7_gui, new_lora_status]
951
  )
952
 
953
  with gr.Accordion("IP-Adapter", open=False, visible=True):
 
979
  minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
980
  info="The maximum proportional size of the generated image based on the uploaded image."
981
  )
982
+ controlnet_model_gui = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0])
983
+ control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
984
+ control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
985
+ control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
986
+ preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=TASK_AND_PREPROCESSORS["canny"])
987
 
988
  def change_preprocessor_choices(task):
989
  task = TASK_STABLEPY[task]
990
+ if task in TASK_AND_PREPROCESSORS.keys():
991
+ choices_task = TASK_AND_PREPROCESSORS[task]
992
  else:
993
+ choices_task = TASK_AND_PREPROCESSORS["canny"]
994
  return gr.update(choices=choices_task, value=choices_task[0])
 
995
  task_gui.change(
996
  change_preprocessor_choices,
997
  [task_gui],
998
  [preprocessor_name_gui],
999
  )
1000
+
1001
+ preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
1002
+ low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
1003
+ high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
1004
+ value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1005
+ distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1006
+ recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1007
+ tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
1008
 
1009
  with gr.Accordion("T2I adapter", open=False, visible=False):
1010
  t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
 
1081
 
1082
  with gr.Accordion("Other settings", open=False, visible=True):
1083
  schedule_prediction_type_gui = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
1084
+ guidance_rescale_gui = gr.Number(label="CFG rescale:", value=0., step=0.01, minimum=0., maximum=1.5)
1085
  save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
1086
  filename_pattern_gui = gr.Textbox(label="Filename pattern", value="model,seed", placeholder="model,seed,sampler,schedule_type,img_width,img_height,guidance_scale,num_steps,vae,prompt_section,neg_prompt_section", lines=1)
1087
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
 
1146
  # enable crop (or disable it)
1147
  # transforms=["crop"],
1148
  brush=gr.Brush(
1149
+ default_size="16", # or leave it as 'auto'
1150
+ color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
1151
+ # default_color="black", # html names are supported
1152
+ colors=[
1153
+ "rgba(0, 0, 0, 1)", # rgb(a)
1154
+ "rgba(0, 0, 0, 0.1)",
1155
+ "rgba(255, 255, 255, 0.1)",
1156
+ # "hsl(360, 120, 120)" # in fact any valid colorstring
1157
+ ]
1158
  ),
1159
  eraser=gr.Eraser(default_size="16")
1160
  )
 
1204
  outputs=[result_up_tab],
1205
  )
1206
 
1207
+ with gr.Tab("Preprocessor", render=True):
1208
+ preprocessor_tab()
1209
+
1210
  generate_button.click(
1211
  fn=sd_gen.load_new_model,
1212
  inputs=[
1213
  model_name_gui,
1214
  vae_model_gui,
1215
+ task_gui,
1216
+ controlnet_model_gui,
1217
  ],
1218
  outputs=[load_model_gui],
1219
  queue=True,
 
1238
  lora_scale_4_gui,
1239
  lora5_gui,
1240
  lora_scale_5_gui,
1241
+ lora6_gui,
1242
+ lora_scale_6_gui,
1243
+ lora7_gui,
1244
+ lora_scale_7_gui,
1245
  sampler_gui,
1246
  schedule_type_gui,
1247
  schedule_prediction_type_gui,
 
1262
  high_threshold_gui,
1263
  value_threshold_gui,
1264
  distance_threshold_gui,
1265
+ recolor_gamma_correction_gui,
1266
+ tile_blur_sigma_gui,
1267
  control_net_output_scaling_gui,
1268
  control_net_start_threshold_gui,
1269
  control_net_stop_threshold_gui,
 
1280
  hires_negative_prompt_gui,
1281
  hires_before_adetailer_gui,
1282
  hires_after_adetailer_gui,
1283
+ hires_schedule_type_gui,
1284
+ hires_guidance_scale_gui,
1285
+ controlnet_model_gui,
1286
  loop_generation_gui,
1287
  leave_progress_bar_gui,
1288
  disable_progress_bar_gui,
 
1324
  mask_blur_b_gui,
1325
  mask_padding_b_gui,
1326
  retain_task_cache_gui,
1327
+ guidance_rescale_gui,
1328
  image_ip1,
1329
  mask_ip1,
1330
  model_ip1,
 
1351
  show_error=True,
1352
  debug=True,
1353
  allowed_paths=["./images/"],
1354
+ )