import gradio as gr import torch from diffusers import StableDiffusionControlNetPipeline, ControlNetModel import os from huggingface_hub import HfApi, login token = os.getenv("HF_TOKEN") login(token=token) # Logs in with the token in Hugging Face Spaces # Load Stable Diffusion model and ControlNet reference-only model model_id = "stabilityai/stable-diffusion-3.5-large-turbo" controlnet_id = "lllyasviel/control_v11p_sd15_inpaint" # Use an appropriate ControlNet variant controlnet = ControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float32) pipeline = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float32 ) # Define the Gradio interface function def generate_image(prompt, reference_image): # Process reference image reference_image = reference_image.resize((512, 512)) # Generate image with reference-only style transfer generated_image = pipeline( prompt=prompt, image=reference_image, controlnet_conditioning_scale=1.0, guidance_scale=7.5, num_inference_steps=50 ).images[0] return generated_image # Set up Gradio interface interface = gr.Interface( fn=generate_image, inputs=[ gr.Textbox(label="Prompt"), gr.Image(type="pil", label="Reference Image (Style)") ], outputs="image", title="Image Generation with Reference-Only Style Transfer", description="Generate an image based on a text prompt and style reference image using Stable Diffusion 3.5 with ControlNet (reference-only mode)." ) # Launch the Gradio interface interface.launch()