takuma104 commited on
Commit
79ccfb0
Β·
1 Parent(s): f530dd2

update example

Browse files
app.py CHANGED
@@ -3,10 +3,6 @@ from diffusers import UniPCMultistepScheduler
3
  import gradio as gr
4
  import torch
5
 
6
- # Constants
7
- low_threshold = 100
8
- high_threshold = 200
9
-
10
  # Models
11
  controlnet_pose = ControlNetModel.from_pretrained(
12
  "lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16
@@ -16,21 +12,22 @@ controlnet_canny = ControlNetModel.from_pretrained(
16
  )
17
 
18
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
19
- "runwayml/stable-diffusion-v1-5",
20
- controlnet=[controlnet_pose,controlnet_canny],
21
  safety_checker=None, torch_dtype=torch.float16
22
- )
23
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
24
 
25
  # This command loads the individual model components on GPU on-demand. So, we don't
26
  # need to explicitly call pipe.to("cuda").
27
- pipe.enable_model_cpu_offload()
28
 
29
  # xformers
30
  pipe.enable_xformers_memory_efficient_attention()
31
 
32
  # Generator seed,
33
- generator = torch.manual_seed(0)
 
34
 
35
  def generate_images(pose_image, canny_image, prompt):
36
  output = pipe(
@@ -56,12 +53,17 @@ gr.Interface(
56
  gr.Textbox(
57
  label="Enter your prompt",
58
  max_lines=1,
59
- placeholder="best quality, extremely detailed, a girl wearing white dress",
60
  ),
61
  ],
62
  outputs=gr.Gallery().style(grid=[2], height="auto"),
63
  title="Generate controlled outputs with Mult-ControlNet and Stable Diffusion using πŸ€—Diffusers",
64
  description="This Space uses pose lines and canny edged image as the additional conditioning. Please refer to the \"Examples\" for what kind of images are appropriate.",
65
- examples=[["sample_pose_body.png", "sample_canny_hand.png", "best quality, extremely detailed, a girl wearing white dress"]],
 
 
 
 
 
66
  allow_flagging=False,
67
- ).launch(enable_queue=True)
 
3
  import gradio as gr
4
  import torch
5
 
 
 
 
 
6
  # Models
7
  controlnet_pose = ControlNetModel.from_pretrained(
8
  "lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16
 
12
  )
13
 
14
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
15
+ "runwayml/stable-diffusion-v1-5",
16
+ controlnet=[controlnet_pose, controlnet_canny],
17
  safety_checker=None, torch_dtype=torch.float16
18
+ ).to('cuda')
19
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
20
 
21
  # This command loads the individual model components on GPU on-demand. So, we don't
22
  # need to explicitly call pipe.to("cuda").
23
+ #pipe.enable_model_cpu_offload()
24
 
25
  # xformers
26
  pipe.enable_xformers_memory_efficient_attention()
27
 
28
  # Generator seed,
29
+ generator = torch.manual_seed(3)
30
+
31
 
32
  def generate_images(pose_image, canny_image, prompt):
33
  output = pipe(
 
53
  gr.Textbox(
54
  label="Enter your prompt",
55
  max_lines=1,
56
+ placeholder="masterpiece, a professional portrait of woman wearing white shirts",
57
  ),
58
  ],
59
  outputs=gr.Gallery().style(grid=[2], height="auto"),
60
  title="Generate controlled outputs with Mult-ControlNet and Stable Diffusion using πŸ€—Diffusers",
61
  description="This Space uses pose lines and canny edged image as the additional conditioning. Please refer to the \"Examples\" for what kind of images are appropriate.",
62
+ examples=[
63
+ ["p2_clip.png",
64
+ "c2_clip.png",
65
+ "masterpiece, a professional portrait of woman wearing white shirts"
66
+ ],
67
+ ],
68
  allow_flagging=False,
69
+ ).launch(enable_queue=True)
sample_canny_hand.png β†’ c2_clip.png RENAMED
File without changes
sample_pose_body.png β†’ p2_clip.png RENAMED
File without changes