snackshell's picture
Update app.py
75726c1 verified
raw
history blame
4.27 kB
import gradio as gr
import numpy as np
import random
from PIL import Image, ImageDraw, ImageFont
import io
from diffusers import DiffusionPipeline
import torch
# ===== CONFIG =====
device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if device == "cuda" else torch.float32
model_repo_id = "stabilityai/sdxl-turbo"
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype, variant="fp16" if device == "cuda" else None)
pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
WATERMARK_TEXT = "SelamGPT"
# ===== WATERMARK FUNCTION =====
def add_watermark(image):
draw = ImageDraw.Draw(image)
font_size = 24
try:
font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
except:
font = ImageFont.load_default()
text_width = draw.textlength(WATERMARK_TEXT, font=font)
x = image.width - text_width - 10
y = image.height - 34
draw.text((x+1, y+1), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 128))
draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255))
return image
# ===== IMAGE GENERATION FUNCTION =====
def generate(
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
progress=gr.Progress(track_tqdm=True),
):
if not prompt.strip():
return None, "⚠️ Please enter a prompt"
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.manual_seed(seed)
result = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
).images[0]
image = add_watermark(result)
return image, seed
# ===== EXAMPLES =====
examples = [
"A futuristic Ethiopian city with flying cars",
"An ancient Aksumite queen in a high-tech palace, digital painting",
"A cyberpunk Habesha coffee ceremony on Mars",
]
# ===== INTERFACE =====
theme = gr.themes.Default(
primary_hue="cyan",
secondary_hue="amber",
font=[gr.themes.GoogleFont("Poppins"), "sans-serif"]
)
css = "#container { max-width: 800px; margin: 0 auto; }"
with gr.Blocks(css=css, theme=theme, title="SelamGPT Turbo Image Generator") as demo:
with gr.Column(elem_id="container"):
gr.Markdown("# 🎨 SelamGPT Turbo Image Generator\n*Powered by SDXL-Turbo (Fast & Creative)*")
with gr.Row():
prompt = gr.Textbox(
label="Prompt",
show_label=False,
placeholder="Describe the image...",
lines=2,
scale=3
)
generate_btn = gr.Button("Generate", variant="primary")
image_output = gr.Image(label="Generated Image", type="pil", format="png", height=512)
seed_output = gr.Textbox(label="Seed Used", interactive=False)
with gr.Accordion("⚙️ Advanced Settings", open=False):
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Things to avoid (optional)", max_lines=1)
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
seed = gr.Slider(0, MAX_SEED, label="Seed", step=1, value=0)
with gr.Row():
width = gr.Slider(256, MAX_IMAGE_SIZE, step=32, label="Width", value=1024)
height = gr.Slider(256, MAX_IMAGE_SIZE, step=32, label="Height", value=1024)
with gr.Row():
guidance_scale = gr.Slider(0.0, 10.0, step=0.1, label="Guidance Scale", value=0.0)
num_inference_steps = gr.Slider(1, 50, step=1, label="Inference Steps", value=2)
gr.Examples(examples=examples, inputs=[prompt])
generate_btn.click(
fn=generate,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps
],
outputs=[image_output, seed_output]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)