snackshell's picture
Update app.py
460870c verified
raw
history blame
5.28 kB
import os
import requests
import gradio as gr
from PIL import Image, ImageDraw, ImageFont
import io
import time
from concurrent.futures import ThreadPoolExecutor
# ===== CONFIGURATION =====
HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
MODEL_NAME = "stabilityai/stable-diffusion-xl-base-1.0"
API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
WATERMARK_TEXT = "SelamGPT"
MAX_RETRIES = 3
TIMEOUT = 45 # Increased timeout for larger images
EXECUTOR = ThreadPoolExecutor(max_workers=2) # Handle concurrent requests
# ===== WATERMARK FUNCTION =====
def add_watermark(image_bytes):
"""Add watermark with smaller text and simplified positioning"""
try:
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
draw = ImageDraw.Draw(image)
# Smaller font size (24 instead of 40)
font_size = 24
try:
font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
except:
font = ImageFont.load_default(font_size)
text = "SelamGPT"
margin = 10 # Reduced from 20
# Calculate position using textlength
text_width = draw.textlength(text, font=font)
x = image.width - text_width - margin
y = image.height - 30 # Fixed vertical position
# Simpler white text without transparency
draw.text((x, y), text, font=font, fill=(255, 255, 255))
return image
except Exception as e:
print(f"Watermark error: {str(e)}")
return Image.open(io.BytesIO(image_bytes))
# ===== IMAGE GENERATION =====
def generate_image(prompt):
"""Generate image with robust error handling"""
if not prompt.strip():
return None, "⚠️ Please enter a prompt"
def api_call():
return requests.post(
API_URL,
headers=headers,
json={
"inputs": prompt,
"parameters": {
"height": 768,
"width": 768,
"num_inference_steps": 25
},
"options": {"wait_for_model": True}
},
timeout=TIMEOUT
)
for attempt in range(MAX_RETRIES):
try:
future = EXECUTOR.submit(api_call)
response = future.result()
if response.status_code == 200:
return add_watermark(response.content), "✔️ Generation successful"
elif response.status_code == 503:
wait_time = (attempt + 1) * 10
print(f"Model loading, waiting {wait_time}s...")
time.sleep(wait_time)
continue
else:
return None, f"⚠️ API Error: {response.text[:200]}"
except requests.Timeout:
return None, "⚠️ Timeout: Model took too long to respond"
except Exception as e:
return None, f"⚠️ Unexpected error: {str(e)[:200]}"
return None, "⚠️ Failed after multiple attempts. Please try again later."
# ===== GRADIO INTERFACE =====
theme = gr.themes.Default(
primary_hue="emerald",
secondary_hue="amber",
font=[gr.themes.GoogleFont("Poppins"), "Arial", "sans-serif"]
)
with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
gr.Markdown("""
# 🎨 SelamGPT Image Generator
*Generate watermarked images with Stable Diffusion XL*
""")
with gr.Row():
with gr.Column(scale=3):
prompt_input = gr.Textbox(
label="Describe your image",
placeholder="A futuristic Ethiopian city with flying cars...",
lines=3,
max_lines=5,
elem_id="prompt-box"
)
with gr.Row():
generate_btn = gr.Button("Generate Image", variant="primary")
clear_btn = gr.Button("Clear")
gr.Examples(
examples=[
["An ancient Aksumite warrior in cyberpunk armor"],
["Traditional Ethiopian coffee ceremony in space"],
["Hyper-realistic portrait of a Habesha woman with neon tribal markings"]
],
inputs=prompt_input,
label="Example Prompts"
)
with gr.Column(scale=2):
output_image = gr.Image(
label="Generated Image",
height=512,
elem_id="output-image"
)
status_output = gr.Textbox(
label="Status",
interactive=False,
elem_id="status-box"
)
# Event handlers
generate_btn.click(
fn=generate_image,
inputs=prompt_input,
outputs=[output_image, status_output],
queue=True,
show_progress="minimal"
)
clear_btn.click(
fn=lambda: [None, ""],
outputs=[output_image, status_output]
)
# ===== DEPLOYMENT CONFIG =====
if __name__ == "__main__":
demo.queue(max_size=2) # Correct parameter name
demo.launch(
server_name="0.0.0.0",
server_port=7860
)