File size: 1,734 Bytes
61e8157
47dd84b
 
eb48411
47dd84b
 
 
456a8a0
47dd84b
d3caf74
47dd84b
d3caf74
47dd84b
 
456a8a0
47dd84b
 
 
 
 
 
 
 
11cf435
47dd84b
d3caf74
47dd84b
 
d3caf74
 
47dd84b
bfc70f6
47dd84b
 
 
 
 
 
 
 
 
 
bfc70f6
d3caf74
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import gradio as gr
import torch
import numpy as np
from PIL import Image
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from diffusers.utils import make_image_grid
import cv2

controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16, use_safetensors=True)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
    "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()

def generate_image(input_image, text_prompt):
    original_image = np.array(input_image)
    low_threshold = 100
    high_threshold = 200
    edges = cv2.Canny(original_image, low_threshold, high_threshold)
    edges = edges[:, :, None]
    canny_image = np.concatenate([edges, edges, edges], axis=2)
    canny_image_pil = Image.fromarray(canny_image)

    output_image = pipe(text_prompt, image=canny_image_pil).images[0]

    result_grid = make_image_grid([input_image, canny_image_pil, output_image], rows=1, cols=3)
    return result_grid

with gr.Blocks() as demo:
    gr.Markdown("# Image Transformation with ControlNet and Stable Diffusion")
    
    with gr.Row():
        with gr.Column():
            input_image = gr.Image(type="pil", label="Upload Image", tool="editor")
            text_prompt = gr.Textbox(label="Enter a prompt for the transformation")

    generate_button = gr.Button("Generate Image")

    result = gr.Image(label="Result", shape=(768, 256))

    generate_button.click(fn=generate_image, inputs=[input_image, text_prompt], outputs=result)

demo.launch()