James Peter Perrfone Jefferies commited on
Commit
955633a
·
1 Parent(s): a680adc

Make ControlNet work

Browse files
Files changed (1) hide show
  1. app.py +17 -7
app.py CHANGED
@@ -3,21 +3,19 @@ import gradio as gr
3
  import numpy as np
4
  import torch
5
 
6
- from diffusers import DiffusionPipeline, StableDiffusionControlNetPipeline
7
  from PIL import Image
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
-
11
  low_threshold = 100
12
  high_threshold = 200
13
 
14
  def generate(
15
  prompt, negative_prompt, num_inference_steps, width, height, guidance_scale, seed, input_image
16
  ):
 
 
17
  if input_image is None:
18
- pipeline = DiffusionPipeline.from_pretrained("Lykon/DreamShaper")
19
- pipeline = pipeline.to(device)
20
- generator = torch.Generator(device=device).manual_seed(seed)
21
 
22
  return pipeline(
23
  prompt=prompt,
@@ -34,7 +32,19 @@ def generate(
34
  image = np.concatenate([image, image, image], axis=2)
35
  canny_image = Image.fromarray(image)
36
 
37
- return canny_image
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  iface = gr.Interface(
40
  fn=generate,
 
3
  import numpy as np
4
  import torch
5
 
6
+ from diffusers import ControlNetModel, DiffusionPipeline, StableDiffusionControlNetPipeline
7
  from PIL import Image
8
 
 
 
9
  low_threshold = 100
10
  high_threshold = 200
11
 
12
  def generate(
13
  prompt, negative_prompt, num_inference_steps, width, height, guidance_scale, seed, input_image
14
  ):
15
+ generator = torch.manual_seed(seed)
16
+
17
  if input_image is None:
18
+ pipeline = DiffusionPipeline.from_pretrained("Lykon/DreamShaper", torch_dtype=torch.float16)
 
 
19
 
20
  return pipeline(
21
  prompt=prompt,
 
32
  image = np.concatenate([image, image, image], axis=2)
33
  canny_image = Image.fromarray(image)
34
 
35
+ controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
36
+ pipeline = StableDiffusionControlNetPipeline.from_pretrained("Lykon/DreamShaper", controlnet=controlnet, torch_dtype=torch.float16)
37
+
38
+ return pipeline(
39
+ prompt=prompt,
40
+ negative_prompt=negative_prompt,
41
+ num_inference_steps=num_inference_steps,
42
+ width=width,
43
+ height=height,
44
+ guidance_scale=guidance_scale,
45
+ generator=generator,
46
+ image=canny_image,
47
+ ).images[0]
48
 
49
  iface = gr.Interface(
50
  fn=generate,