John6666 commited on
Commit
27218a1
1 Parent(s): 30cd8dd

Upload dc.py

Browse files
Files changed (1) hide show
  1. dc.py +22 -51
dc.py CHANGED
@@ -40,27 +40,14 @@ from stablepy import logger
40
  logger.setLevel(logging.CRITICAL)
41
 
42
  from env import (
43
- hf_token,
44
- hf_read_token, # to use only for private repos
45
- CIVITAI_API_KEY,
46
- HF_LORA_PRIVATE_REPOS1,
47
- HF_LORA_PRIVATE_REPOS2,
48
- HF_LORA_ESSENTIAL_PRIVATE_REPO,
49
- HF_VAE_PRIVATE_REPO,
50
- HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO,
51
- HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
52
- directory_models,
53
- directory_loras,
54
- directory_vaes,
55
- directory_embeds,
56
- directory_embeds_sdxl,
57
- directory_embeds_positive_sdxl,
58
- load_diffusers_format_model,
59
- download_model_list,
60
- download_lora_list,
61
- download_vae_list,
62
- download_embeds,
63
- )
64
 
65
  preprocessor_controlnet = {
66
  "openpose": [
@@ -198,16 +185,8 @@ def process_string(input_string):
198
  return None
199
 
200
  ## BEGIN MOD
201
- from modutils import (
202
- to_list,
203
- list_uniq,
204
- list_sub,
205
- get_model_id_list,
206
- get_tupled_embed_list,
207
- get_tupled_model_list,
208
- get_lora_model_list,
209
- download_private_repo,
210
- )
211
 
212
  # - **Download Models**
213
  download_model = ", ".join(download_model_list)
@@ -335,6 +314,7 @@ class GuiSD:
335
  vae_model=None,
336
  type_model_precision=torch.float16,
337
  retain_task_model_in_cache=False,
 
338
  )
339
 
340
  def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
@@ -362,6 +342,7 @@ class GuiSD:
362
  if incompatible_vae:
363
  vae_model = None
364
 
 
365
 
366
  self.model.load_pipe(
367
  model_name,
@@ -539,6 +520,7 @@ class GuiSD:
539
  params_ip_scale.append(scaleip)
540
 
541
  # First load
 
542
  model_precision = torch.float16
543
  if not self.model:
544
  from stablepy import Model_Diffusers
@@ -706,29 +688,18 @@ class GuiSD:
706
  info_state = info_state + "<br>" + vae_msg
707
  if msg_lora:
708
  info_state = info_state + "<br>" + "<br>".join(msg_lora)
709
- return self.infer_short(self.model, pipe_params), info_state
710
  ## END MOD
711
 
712
 
713
  from pathlib import Path
714
- from modutils import (
715
- safe_float,
716
- escape_lora_basename,
717
- to_lora_key,
718
- to_lora_path,
719
- get_local_model_list,
720
- get_private_lora_model_lists,
721
- get_valid_lora_name,
722
- get_valid_lora_path,
723
- get_valid_lora_wt,
724
- get_lora_info,
725
- normalize_prompt_list,
726
- get_civitai_info,
727
- search_lora_on_civitai,
728
- )
729
 
730
  sd_gen = GuiSD()
731
- @spaces.GPU
732
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
733
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
734
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
@@ -757,7 +728,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
757
  lora5 = get_valid_lora_path(lora5)
758
  progress(1, desc="Preparation completed. Starting inference preparation...")
759
 
760
- sd_gen.load_new_model(model_name, vae, task_model_list[0])
761
  images, info = sd_gen.generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
762
  guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
763
  lora4, lora4_wt, lora5, lora5_wt, sampler,
@@ -767,7 +738,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
767
  False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
768
  False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
769
  False, "", "", 0.35, True, True, False, 4, 4, 32,
770
- True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7
771
  )
772
 
773
  progress(1, desc="Inference completed.")
@@ -776,7 +747,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
776
  return output_image
777
 
778
 
779
- @spaces.GPU
780
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
781
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
782
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
 
40
  logger.setLevel(logging.CRITICAL)
41
 
42
  from env import (
43
+ hf_token, hf_read_token, # to use only for private repos
44
+ CIVITAI_API_KEY, HF_LORA_PRIVATE_REPOS1, HF_LORA_PRIVATE_REPOS2,
45
+ HF_LORA_ESSENTIAL_PRIVATE_REPO, HF_VAE_PRIVATE_REPO,
46
+ HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
47
+ directory_models, directory_loras, directory_vaes, directory_embeds,
48
+ directory_embeds_sdxl, directory_embeds_positive_sdxl,
49
+ load_diffusers_format_model, download_model_list, download_lora_list,
50
+ download_vae_list, download_embeds)
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  preprocessor_controlnet = {
53
  "openpose": [
 
185
  return None
186
 
187
  ## BEGIN MOD
188
+ from modutils import (to_list, list_uniq, list_sub, get_model_id_list, get_tupled_embed_list,
189
+ get_tupled_model_list, get_lora_model_list, download_private_repo)
 
 
 
 
 
 
 
 
190
 
191
  # - **Download Models**
192
  download_model = ", ".join(download_model_list)
 
314
  vae_model=None,
315
  type_model_precision=torch.float16,
316
  retain_task_model_in_cache=False,
317
+ device="cpu",
318
  )
319
 
320
  def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
 
342
  if incompatible_vae:
343
  vae_model = None
344
 
345
+ self.model.device = torch.device("cpu")
346
 
347
  self.model.load_pipe(
348
  model_name,
 
520
  params_ip_scale.append(scaleip)
521
 
522
  # First load
523
+ self.model.device = torch.device("cuda:0")
524
  model_precision = torch.float16
525
  if not self.model:
526
  from stablepy import Model_Diffusers
 
688
  info_state = info_state + "<br>" + vae_msg
689
  if msg_lora:
690
  info_state = info_state + "<br>" + "<br>".join(msg_lora)
691
+ return self.infer_short(self.model, pipe_params, progress), info_state
692
  ## END MOD
693
 
694
 
695
  from pathlib import Path
696
+ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
697
+ get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
698
+ get_valid_lora_path, get_valid_lora_wt, get_lora_info,
699
+ normalize_prompt_list, get_civitai_info, search_lora_on_civitai)
 
 
 
 
 
 
 
 
 
 
 
700
 
701
  sd_gen = GuiSD()
702
+ #@spaces.GPU
703
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
704
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
705
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
 
728
  lora5 = get_valid_lora_path(lora5)
729
  progress(1, desc="Preparation completed. Starting inference preparation...")
730
 
731
+ sd_gen.load_new_model(model_name, vae, task_model_list[0], progress)
732
  images, info = sd_gen.generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
733
  guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
734
  lora4, lora4_wt, lora5, lora5_wt, sampler,
 
738
  False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
739
  False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
740
  False, "", "", 0.35, True, True, False, 4, 4, 32,
741
+ True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, progress
742
  )
743
 
744
  progress(1, desc="Inference completed.")
 
747
  return output_image
748
 
749
 
750
+ #@spaces.GPU
751
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
752
  model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
753
  lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,