James Peter Perrfone Jefferies commited on
Commit
a680adc
·
1 Parent(s): 7c82057

Use input image for ControlNet

Browse files
Files changed (1) hide show
  1. app.py +22 -23
app.py CHANGED
@@ -4,39 +4,37 @@ import numpy as np
4
  import torch
5
 
6
  from diffusers import DiffusionPipeline, StableDiffusionControlNetPipeline
7
- from diffusers.utils import load_image
8
  from PIL import Image
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
- image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
13
- image = np.array(image)
14
-
15
  low_threshold = 100
16
  high_threshold = 200
17
 
18
- image = cv2.Canny(image, low_threshold, high_threshold)
19
- image = image[:, :, None]
20
- image = np.concatenate([image, image, image], axis=2)
21
- canny_image = Image.fromarray(image)
22
-
23
  def generate(
24
- prompt, negative_prompt, num_inference_steps, width, height, guidance_scale, seed
25
  ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  return canny_image
27
- # pipeline = DiffusionPipeline.from_pretrained("Lykon/DreamShaper")
28
- # pipeline = pipeline.to(device)
29
- # generator = torch.Generator(device=device).manual_seed(seed)
30
-
31
- # return pipeline(
32
- # prompt=prompt,
33
- # negative_prompt=negative_prompt,
34
- # num_inference_steps=num_inference_steps,
35
- # width=width,
36
- # height=height,
37
- # guidance_scale=guidance_scale,
38
- # generator=generator,
39
- # ).images[0]
40
 
41
  iface = gr.Interface(
42
  fn=generate,
@@ -54,6 +52,7 @@ iface = gr.Interface(
54
  step=1,
55
  randomize=True,
56
  ),
 
57
  ],
58
  outputs="image",
59
  )
 
4
  import torch
5
 
6
  from diffusers import DiffusionPipeline, StableDiffusionControlNetPipeline
 
7
  from PIL import Image
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
 
 
 
 
11
  low_threshold = 100
12
  high_threshold = 200
13
 
 
 
 
 
 
14
  def generate(
15
+ prompt, negative_prompt, num_inference_steps, width, height, guidance_scale, seed, input_image
16
  ):
17
+ if input_image is None:
18
+ pipeline = DiffusionPipeline.from_pretrained("Lykon/DreamShaper")
19
+ pipeline = pipeline.to(device)
20
+ generator = torch.Generator(device=device).manual_seed(seed)
21
+
22
+ return pipeline(
23
+ prompt=prompt,
24
+ negative_prompt=negative_prompt,
25
+ num_inference_steps=num_inference_steps,
26
+ width=width,
27
+ height=height,
28
+ guidance_scale=guidance_scale,
29
+ generator=generator,
30
+ ).images[0]
31
+
32
+ image = cv2.Canny(input_image, low_threshold, high_threshold)
33
+ image = image[:, :, None]
34
+ image = np.concatenate([image, image, image], axis=2)
35
+ canny_image = Image.fromarray(image)
36
+
37
  return canny_image
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  iface = gr.Interface(
40
  fn=generate,
 
52
  step=1,
53
  randomize=True,
54
  ),
55
+ gr.Image(label="Input Image", source='upload', type="numpy")
56
  ],
57
  outputs="image",
58
  )