Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,947 Bytes
c5bfeea e7039b9 532bae2 c5bfeea e7039b9 c5bfeea e7039b9 532bae2 e7039b9 532bae2 e7039b9 532bae2 e7039b9 c5bfeea e7039b9 c5bfeea 532bae2 e7039b9 c5bfeea e7039b9 c5bfeea e7039b9 c5bfeea 532bae2 e7039b9 c5bfeea e7039b9 c5bfeea e7039b9 c5bfeea e7039b9 c5bfeea 1efa926 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import torch
import gradio as gr
import spaces # Import spaces for ZeroGPU support
from functools import lru_cache
from diffusers import StableDiffusionXLPipeline # β
Correct pipeline for text-to-image
# LoRA model path on Hugging Face Hub
color_book_lora_path = "artificialguybr/ColoringBookRedmond-V2"
color_book_trigger = ", ColoringBookAF, Coloring Book"
# Load model on CPU initially
@lru_cache(maxsize=1)
def load_pipeline(use_lora: bool):
"""Load Stable Diffusion XL pipeline and LoRA weights (if selected)."""
# β
Use StableDiffusionXLPipeline for text-to-image generation
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
use_safetensors=True
)
# Keep the model on CPU until GPU is requested
pipe.to("cpu")
# Load LoRA if selected
if use_lora:
pipe.load_lora_weights(color_book_lora_path)
return pipe
# Define styles
styles = {
"Neonpunk": {
"prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibrant, stunningly beautiful, crisp, "
"detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic",
"negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured"
},
"Retro Cyberpunk": {
"prompt": "retro cyberpunk, 80's inspired, synthwave, neon, vibrant, detailed, retro futurism",
"negative_prompt": "modern, desaturated, black and white, realism, low contrast"
},
"Dark Fantasy": {
"prompt": "Dark Fantasy Art, dark, moody, dark fantasy style",
"negative_prompt": "ugly, deformed, noisy, blurry, low contrast, bright, sunny"
},
"Double Exposure": {
"prompt": "Double Exposure Style, double image ghost effect, image combination, double exposure style",
"negative_prompt": "ugly, deformed, noisy, blurry, low contrast"
},
"None": {
"prompt": "8K",
"negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured"
}
}
@spaces.GPU # ZeroGPU: Allocate GPU only when generating images
def generate_image(prompt: str, style_name: str, use_lora: bool):
"""Generate an image using Stable Diffusion XL with optional LoRA fine-tuning."""
# Load the pipeline (cached)
pipeline = load_pipeline(use_lora)
# Move model to GPU only when needed
pipeline.to("cuda")
# Get the selected style details
style_prompt = styles.get(style_name, {}).get("prompt", "")
negative_prompt = styles.get(style_name, {}).get("negative_prompt", "")
# Apply LoRA trigger phrase if enabled
if use_lora:
prompt += color_book_trigger
# β
Ensure text-to-image pipeline is used correctly
image = pipeline(
prompt=prompt + " " + style_prompt,
negative_prompt="blurred, ugly, watermark, low resolution, " + negative_prompt,
num_inference_steps=20,
guidance_scale=9.0
).images[0]
# Move model back to CPU to free GPU resources
pipeline.to("cpu")
return image
# Gradio Interface for Hugging Face Spaces (ZeroGPU-compatible)
interface = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(label="Enter Your Prompt", placeholder="A cute lion"),
gr.Dropdown(label="Select a Style", choices=list(styles.keys()), value="None"),
gr.Checkbox(label="Use Coloring Book LoRA", value=False)
],
outputs=gr.Image(label="Generated Image"),
title="π¨ AI Coloring Book & Style Generator",
description="Generate AI-powered art using Stable Diffusion XL on Hugging Face Spaces. "
"Choose a style or enable a LoRA fine-tuned coloring book effect."
)
# Run Gradio app for Hugging Face Spaces
if __name__ == "__main__":
interface.launch()
|