Krebzonide commited on
Commit
54ad393
·
1 Parent(s): dd7a042

Removed control net because something didn’t work

Browse files
Files changed (1) hide show
  1. app.py +7 -38
app.py CHANGED
@@ -1,45 +1,15 @@
1
- from diffusers import AutoencoderKL, StableDiffusionXLControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
2
  import torch
3
- from controlnet_aux import OpenposeDetector
4
- from diffusers.utils import load_image
5
  import gradio as gr
6
 
7
- #sd1.5 bases
8
- #model_base = "SG161222/Realistic_Vision_V5.1_noVAE" #fantasy people
9
- #model_base = "Justin-Choo/epiCRealism-Natural_Sin_RC1_VAE" #cartoon people
10
- #model_base = "Lykon/DreamShaper" #unrealistic people
11
- #model_base = "runwayml/stable-diffusion-v1-5" #base
12
- #model_base = "Krebzonide/LazyMixPlus" #nsfw people
13
- #model_base = "Krebzonide/Humans" #boring people
14
- #model_base = "aufahr/unofficial_aom3" #anime people
15
-
16
- #lora_model_path = "Krebzonide/LoRA-CH-0" #mecjh - Corey H, traind on epiCRealism
17
- #lora_model_path = "Krebzonide/LoRA-CH-1" #mecjh - Corey H, traind on epiCRealism
18
- #lora_model_path = "Krebzonide/LoRA-EM1" #exgfem - Emily M, trained on LizyMixPlus
19
- #lora_model_path = "Krebzonide/LoRA-EM-2-0" #exgfem - Emily M, trained on Humans
20
- #lora_model_path = "Krebzonide/LoRA-YX1" #uwspyx - Professor Xing, trained on Realistic_Vision
21
-
22
- #pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16, use_safetensors=True, use_auth_token="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")
23
- #pipe.unet.load_attn_procs(lora_model_path, use_auth_token="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")
24
- #pipe.to("cuda")
25
-
26
-
27
  model_base = "stabilityai/stable-diffusion-xl-base-1.0"
28
 
29
- openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
30
-
31
- #image is a random guy. openpose_image is the pose of that guy.
32
- image = load_image(
33
- "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png"
34
- )
35
- openpose_image = openpose(image)
36
-
37
- controlnet = ControlNetModel.from_pretrained("thibaud/controlnet-openpose-sdxl-1.0", torch_dtype=torch.float16)
38
-
39
- pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
40
- model_base, controlnet=controlnet, torch_dtype=torch.float16
41
  )
42
- pipe.enable_model_cpu_offload()
43
 
44
  css = """
45
  .btn-green {
@@ -61,7 +31,6 @@ def generate(prompt, neg_prompt, samp_steps, guide_scale, lora_scale, progress=g
61
  #cross_attention_kwargs={"scale": lora_scale},
62
  num_images_per_prompt=4,
63
  #generator=torch.manual_seed(97),
64
- image=openpose_image.resize((1024, 1024)), #THIS IS THE OPENPOSE IMAGE
65
  ).images
66
  return [(img, f"Image {i+1}") for i, img in enumerate(images)]
67
 
@@ -71,7 +40,7 @@ with gr.Blocks(css=css) as demo:
71
  prompt = gr.Textbox(label="Prompt")
72
  negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, bad hands, cropped, worst quality, disfigured, deformed, extra limbs, asian, filter, render")
73
  submit_btn = gr.Button("Generate", elem_classes="btn-green")
74
- gallery = gr.Gallery(label="Generated images", height=700)
75
  with gr.Row():
76
  samp_steps = gr.Slider(1, 100, value=25, step=1, label="Sampling steps")
77
  guide_scale = gr.Slider(1, 10, value=6, step=0.5, label="Guidance scale")
 
1
+ from diffusers import StableDiffusionXLPipeline
2
  import torch
3
+ #from controlnet_aux import OpenposeDetector
4
+ #from diffusers.utils import load_image
5
  import gradio as gr
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  model_base = "stabilityai/stable-diffusion-xl-base-1.0"
8
 
9
+ pipe = StableDiffusionXLPipeline.from_pretrained(
10
+ model_base, torch_dtype=torch.float16
 
 
 
 
 
 
 
 
 
 
11
  )
12
+ pipe = pipe.to("cuda")
13
 
14
  css = """
15
  .btn-green {
 
31
  #cross_attention_kwargs={"scale": lora_scale},
32
  num_images_per_prompt=4,
33
  #generator=torch.manual_seed(97),
 
34
  ).images
35
  return [(img, f"Image {i+1}") for i, img in enumerate(images)]
36
 
 
40
  prompt = gr.Textbox(label="Prompt")
41
  negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, bad hands, cropped, worst quality, disfigured, deformed, extra limbs, asian, filter, render")
42
  submit_btn = gr.Button("Generate", elem_classes="btn-green")
43
+ gallery = gr.Gallery(label="Generated images", height=1100)
44
  with gr.Row():
45
  samp_steps = gr.Slider(1, 100, value=25, step=1, label="Sampling steps")
46
  guide_scale = gr.Slider(1, 10, value=6, step=0.5, label="Guidance scale")