File size: 2,110 Bytes
683afc3
c1497a6
0737dc8
74c4e79
9754bfe
f5ffe3a
 
97c3973
 
 
 
feede18
4fbc46c
c1497a6
683afc3
97c3973
 
 
f5ffe3a
 
b12bc82
97c3973
 
 
 
 
 
 
 
 
 
 
 
 
bcbf6e0
0737dc8
74c4e79
5a5a07a
97c3973
 
 
 
 
 
 
 
 
 
 
 
683afc3
7968596
 
 
 
 
feede18
7968596
 
 
 
 
9754bfe
7968596
683afc3
7968596
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio as gr
from huggingface_hub import login
import os
import spaces
import torch
from diffusers import StableDiffusionXLPipeline
from PIL import Image
import torch
from diffusers import AutoPipelineForText2Image, DDIMScheduler
from transformers import CLIPVisionModelWithProjection
from diffusers.utils import load_image

token = os.getenv("HF_TOKEN")
login(token=token)

image_encoder = CLIPVisionModelWithProjection.from_pretrained(
    "h94/IP-Adapter",
    subfolder="models/image_encoder",
    torch_dtype=torch.float16,
)

pipeline = AutoPipelineForText2Image.from_pretrained(
    "stabilityai/stable-diffusion-xl-base-1.0",
    torch_dtype=torch.float16,
    image_encoder=image_encoder,
)
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
pipeline.load_ip_adapter(
  "h94/IP-Adapter",
  subfolder="sdxl_models",
  weight_name=["ip-adapter-plus_sdxl_vit-h.safetensors", "ip-adapter-plus-face_sdxl_vit-h.safetensors"]
)
pipeline.set_ip_adapter_scale([0.7, 0.3])
pipeline.enable_model_cpu_offload()


@spaces.GPU
def generate_image(prompt, reference_image, controlnet_conditioning_scale):
    reference_image = Image.open(reference_image)
    # reference_image.resize((512, 512))
    pipeline.set_ip_adapter_scale([controlnet_conditioning_scale])

    image = pipeline(
        prompt=prompt,
        ip_adapter_image=[reference_image],
        negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
        num_inference_steps=50, num_images_per_prompt=1,
    ).images[0]

    return image

# Set up Gradio interface
interface = gr.Interface(
    fn=generate_image,
    inputs=[
        gr.Textbox(label="Prompt"),
        gr.Image( type= "filepath",label="Reference Image (Style)"),
        gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=0.6),
    ],
    outputs="image",
    title="Image Generation with Stable Diffusion 3 medium and ControlNet",
    description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."

)

interface.launch()