korp123 sayakpaul HF Staff commited on
Commit
1dfb436
·
0 Parent(s):

Duplicate from diffusers/controlnet-openpose

Browse files

Co-authored-by: Sayak Paul <[email protected]>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +68 -0
  4. requirements.txt +7 -0
  5. yoga1.jpeg +0 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ControlNet Openpose
3
+ emoji: 😻
4
+ colorFrom: green
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.19.1
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: diffusers/controlnet-openpose
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from controlnet_aux import OpenposeDetector
2
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
3
+ from diffusers import UniPCMultistepScheduler
4
+ import gradio as gr
5
+ import torch
6
+
7
+ # Constants
8
+ low_threshold = 100
9
+ high_threshold = 200
10
+
11
+ # Models
12
+ pose_model = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
13
+ controlnet = ControlNetModel.from_pretrained(
14
+ "lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16
15
+ )
16
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
17
+ "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
18
+ )
19
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
20
+
21
+ # This command loads the individual model components on GPU on-demand. So, we don't
22
+ # need to explicitly call pipe.to("cuda").
23
+ pipe.enable_model_cpu_offload()
24
+
25
+ # xformers
26
+ pipe.enable_xformers_memory_efficient_attention()
27
+
28
+ # Generator seed,
29
+ generator = torch.manual_seed(0)
30
+
31
+ def get_pose(image):
32
+ return pose_model(image)
33
+
34
+
35
+ def generate_images(image, prompt):
36
+ pose = get_pose(image)
37
+ output = pipe(
38
+ prompt,
39
+ pose,
40
+ generator=generator,
41
+ num_images_per_prompt=3,
42
+ num_inference_steps=20,
43
+ )
44
+ all_outputs = []
45
+ all_outputs.append(pose)
46
+ for image in output.images:
47
+ all_outputs.append(image)
48
+ return all_outputs
49
+
50
+
51
+ gr.Interface(
52
+ generate_images,
53
+ inputs=[
54
+ gr.Image(type="pil"),
55
+ gr.Textbox(
56
+ label="Enter your prompt",
57
+ max_lines=1,
58
+ placeholder="best quality, extremely detailed",
59
+ ),
60
+ ],
61
+ outputs=gr.Gallery().style(grid=[2], height="auto"),
62
+ title="Generate controlled outputs with ControlNet and Stable Diffusion. ",
63
+ description="This Space uses pose estimated lines as the additional conditioning.",
64
+ examples=[["yoga1.jpeg", "best quality, extremely detailed"]],
65
+ allow_flagging=False,
66
+ ).launch(enable_queue=True)
67
+
68
+
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/diffusers.git
2
+ transformers
3
+ git+https://github.com/huggingface/accelerate
4
+ torch==1.13.1
5
+ numpy
6
+ controlnet_aux
7
+ xformers==0.0.16
yoga1.jpeg ADDED