snackshell's picture
Update app.py
4cda5f1 verified
raw
history blame
5.03 kB
import os
import requests
import gradio as gr
from PIL import Image, ImageDraw, ImageFont
import io
import time
from concurrent.futures import ThreadPoolExecutor
# ===== CONFIGURATION =====
HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
MODEL_NAME = "stabilityai/stable-diffusion-xl-base-1.0"
API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
WATERMARK_TEXT = "SelamGPT"
MAX_RETRIES = 3
TIMEOUT = 45 # Reduced timeout for faster failover
EXECUTOR = ThreadPoolExecutor(max_workers=3) # Increased workers
# ===== OPTIMIZED WATERMARK FUNCTION (WebP) =====
def add_watermark(image_bytes):
"""Add watermark with optimized WebP output (85% quality)"""
try:
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
draw = ImageDraw.Draw(image)
font_size = 24
try:
font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
except:
font = ImageFont.load_default(font_size)
text_width = draw.textlength(WATERMARK_TEXT, font=font)
x = image.width - text_width - 10
y = image.height - 34
draw.text((x+1, y+1), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 128))
draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255))
# Convert to WebP (faster + smaller)
webp_buffer = io.BytesIO()
image.save(webp_buffer, format="WEBP", quality=85, method=6) # 85% quality, best compression
webp_buffer.seek(0)
return Image.open(webp_buffer)
except Exception as e:
print(f"Watermark error: {str(e)}")
return Image.open(io.BytesIO(image_bytes))
# ===== FASTER IMAGE GENERATION =====
def generate_image(prompt):
if not prompt.strip():
return None, "⚠️ Please enter a prompt"
# Faster generation parameters
params = {
"height": 768, # Slightly smaller = faster
"width": 768,
"num_inference_steps": 20, # Reduced from 30
"guidance_scale": 7.0, # Slightly lower for speed
"options": {"wait_for_model": False} # Don't wait if model is loading
}
def api_call():
return requests.post(
API_URL,
headers=headers,
json={
"inputs": prompt,
"parameters": params
},
timeout=TIMEOUT
)
for attempt in range(MAX_RETRIES):
try:
start_time = time.time()
response = EXECUTOR.submit(api_call).result()
if response.status_code == 200:
gen_time = time.time() - start_time
return add_watermark(response.content), f"✔️ Generated in {gen_time:.1f}s"
elif response.status_code == 503:
if attempt < MAX_RETRIES - 1:
time.sleep(5 * (attempt + 1)) # Progressive backoff
continue
else:
return None, f"⚠️ API Error: {response.text[:200]}"
except requests.Timeout:
return None, f"⚠️ Timeout: Model took >{TIMEOUT}s"
except Exception as e:
return None, f"⚠️ Error: {str(e)[:200]}"
return None, "⚠️ Failed after retries. Try again."
# ===== GRADIO INTERFACE =====
with gr.Blocks(title="SelamGPT Image Generator") as demo:
gr.Markdown("""
# 🎨 SelamGPT Image Generator
*Optimized for speed (WebP output @ 768px)*
""")
with gr.Row():
with gr.Column(scale=3):
prompt_input = gr.Textbox(
label="Describe your image",
placeholder="A futuristic Ethiopian city with flying cars...",
lines=3
)
with gr.Row():
generate_btn = gr.Button("Generate Image", variant="primary")
clear_btn = gr.Button("Clear")
gr.Examples(
examples=[
["An ancient Aksumite warrior in cyberpunk armor"],
["Traditional Ethiopian coffee ceremony"],
["Habesha queen with jewelry"]
],
inputs=prompt_input
)
with gr.Column(scale=2):
output_image = gr.Image(
label="Generated Image (WebP)",
type="pil",
format="webp", # WebP output
height=512
)
status_output = gr.Textbox(
label="Status",
interactive=False
)
generate_btn.click(
fn=generate_image,
inputs=prompt_input,
outputs=[output_image, status_output],
queue=True
)
clear_btn.click(
fn=lambda: [None, ""],
outputs=[output_image, status_output]
)
if __name__ == "__main__":
demo.queue(concurrency_count=3) # Increased concurrency
demo.launch(server_name="0.0.0.0", server_port=7860)