Krebzonide commited on
Commit
1291143
·
1 Parent(s): 24d8b2e

Upgrading from sd 1.5 to sdxl

Browse files
Files changed (1) hide show
  1. app.py +31 -8
app.py CHANGED
@@ -1,24 +1,45 @@
 
1
  import torch
2
- from diffusers import StableDiffusionPipeline
 
3
  import gradio as gr
4
 
 
5
  #model_base = "SG161222/Realistic_Vision_V5.1_noVAE" #fantasy people
6
  #model_base = "Justin-Choo/epiCRealism-Natural_Sin_RC1_VAE" #cartoon people
7
  #model_base = "Lykon/DreamShaper" #unrealistic people
8
  #model_base = "runwayml/stable-diffusion-v1-5" #base
9
- model_base = "Krebzonide/LazyMixPlus" #nsfw people
10
  #model_base = "Krebzonide/Humans" #boring people
11
  #model_base = "aufahr/unofficial_aom3" #anime people
12
 
13
  #lora_model_path = "Krebzonide/LoRA-CH-0" #mecjh - Corey H, traind on epiCRealism
14
  #lora_model_path = "Krebzonide/LoRA-CH-1" #mecjh - Corey H, traind on epiCRealism
15
- lora_model_path = "Krebzonide/LoRA-EM1" #exgfem - Emily M, trained on LizyMixPlus
16
  #lora_model_path = "Krebzonide/LoRA-EM-2-0" #exgfem - Emily M, trained on Humans
17
  #lora_model_path = "Krebzonide/LoRA-YX1" #uwspyx - Professor Xing, trained on Realistic_Vision
18
 
19
- pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16, use_safetensors=True, use_auth_token="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")
20
- pipe.unet.load_attn_procs(lora_model_path, use_auth_token="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")
21
- pipe.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  css = """
24
  .btn-green {
@@ -37,8 +58,10 @@ def generate(prompt, neg_prompt, samp_steps, guide_scale, lora_scale, progress=g
37
  negative_prompt=neg_prompt,
38
  num_inference_steps=samp_steps,
39
  guidance_scale=guide_scale,
40
- cross_attention_kwargs={"scale": lora_scale},
41
- num_images_per_prompt=6
 
 
42
  ).images
43
  return [(img, f"Image {i+1}") for i, img in enumerate(images)]
44
 
 
1
+ from diffusers import AutoencoderKL, StableDiffusionXLControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
2
  import torch
3
+ from controlnet_aux import OpenposeDetector
4
+ from diffusers.utils import load_image
5
  import gradio as gr
6
 
7
+ #sd1.5 bases
8
  #model_base = "SG161222/Realistic_Vision_V5.1_noVAE" #fantasy people
9
  #model_base = "Justin-Choo/epiCRealism-Natural_Sin_RC1_VAE" #cartoon people
10
  #model_base = "Lykon/DreamShaper" #unrealistic people
11
  #model_base = "runwayml/stable-diffusion-v1-5" #base
12
+ #model_base = "Krebzonide/LazyMixPlus" #nsfw people
13
  #model_base = "Krebzonide/Humans" #boring people
14
  #model_base = "aufahr/unofficial_aom3" #anime people
15
 
16
  #lora_model_path = "Krebzonide/LoRA-CH-0" #mecjh - Corey H, traind on epiCRealism
17
  #lora_model_path = "Krebzonide/LoRA-CH-1" #mecjh - Corey H, traind on epiCRealism
18
+ #lora_model_path = "Krebzonide/LoRA-EM1" #exgfem - Emily M, trained on LizyMixPlus
19
  #lora_model_path = "Krebzonide/LoRA-EM-2-0" #exgfem - Emily M, trained on Humans
20
  #lora_model_path = "Krebzonide/LoRA-YX1" #uwspyx - Professor Xing, trained on Realistic_Vision
21
 
22
+ #pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16, use_safetensors=True, use_auth_token="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")
23
+ #pipe.unet.load_attn_procs(lora_model_path, use_auth_token="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")
24
+ #pipe.to("cuda")
25
+
26
+
27
+ model_base = "stabilityai/stable-diffusion-xl-base-1.0"
28
+
29
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
30
+
31
+ #image is a random guy. openpose_image is the pose of that guy.
32
+ image = load_image(
33
+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png"
34
+ )
35
+ openpose_image = openpose(image)
36
+
37
+ controlnet = ControlNetModel.from_pretrained("thibaud/controlnet-openpose-sdxl-1.0", torch_dtype=torch.float16)
38
+
39
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
40
+ model_base, controlnet=controlnet, torch_dtype=torch.float16
41
+ )
42
+ pipe.enable_model_cpu_offload()
43
 
44
  css = """
45
  .btn-green {
 
58
  negative_prompt=neg_prompt,
59
  num_inference_steps=samp_steps,
60
  guidance_scale=guide_scale,
61
+ #cross_attention_kwargs={"scale": lora_scale},
62
+ num_images_per_prompt=4,
63
+ #generator=torch.manual_seed(97),
64
+ image=openpose_image.resize((1024, 1024)), #THIS IS THE OPENPOSE IMAGE
65
  ).images
66
  return [(img, f"Image {i+1}") for i, img in enumerate(images)]
67