James Peter Perrfone Jefferies commited on
Commit
7c82057
·
1 Parent(s): 798391a

First ControlNet attempt

Browse files
Files changed (2) hide show
  1. app.py +31 -14
  2. requirements.txt +1 -0
app.py CHANGED
@@ -1,25 +1,42 @@
 
1
  import gradio as gr
 
2
  import torch
3
- from diffusers import DiffusionPipeline
 
 
 
4
 
5
  device = "cuda" if torch.cuda.is_available() else "cpu"
6
 
 
 
 
 
 
 
 
 
 
 
 
7
  def generate(
8
  prompt, negative_prompt, num_inference_steps, width, height, guidance_scale, seed
9
  ):
10
- pipeline = DiffusionPipeline.from_pretrained("Lykon/DreamShaper")
11
- pipeline = pipeline.to(device)
12
- generator = torch.Generator(device=device).manual_seed(seed)
13
-
14
- return pipeline(
15
- prompt=prompt,
16
- negative_prompt=negative_prompt,
17
- num_inference_steps=num_inference_steps,
18
- width=width,
19
- height=height,
20
- guidance_scale=guidance_scale,
21
- generator=generator,
22
- ).images[0]
 
23
 
24
  iface = gr.Interface(
25
  fn=generate,
 
1
+ import cv2
2
  import gradio as gr
3
+ import numpy as np
4
  import torch
5
+
6
+ from diffusers import DiffusionPipeline, StableDiffusionControlNetPipeline
7
+ from diffusers.utils import load_image
8
+ from PIL import Image
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
+ image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
13
+ image = np.array(image)
14
+
15
+ low_threshold = 100
16
+ high_threshold = 200
17
+
18
+ image = cv2.Canny(image, low_threshold, high_threshold)
19
+ image = image[:, :, None]
20
+ image = np.concatenate([image, image, image], axis=2)
21
+ canny_image = Image.fromarray(image)
22
+
23
  def generate(
24
  prompt, negative_prompt, num_inference_steps, width, height, guidance_scale, seed
25
  ):
26
+ return canny_image
27
+ # pipeline = DiffusionPipeline.from_pretrained("Lykon/DreamShaper")
28
+ # pipeline = pipeline.to(device)
29
+ # generator = torch.Generator(device=device).manual_seed(seed)
30
+
31
+ # return pipeline(
32
+ # prompt=prompt,
33
+ # negative_prompt=negative_prompt,
34
+ # num_inference_steps=num_inference_steps,
35
+ # width=width,
36
+ # height=height,
37
+ # guidance_scale=guidance_scale,
38
+ # generator=generator,
39
+ # ).images[0]
40
 
41
  iface = gr.Interface(
42
  fn=generate,
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  accelerate
2
  diffusers
 
3
  torch
4
  transformers
 
1
  accelerate
2
  diffusers
3
+ opencv-contrib-python
4
  torch
5
  transformers