import os import requests import gradio as gr from PIL import Image, ImageDraw, ImageFont import io import time from concurrent.futures import ThreadPoolExecutor # ===== CONFIGURATION ===== HF_API_TOKEN = os.environ.get("HF_API_TOKEN") MODEL_NAME = "stabilityai/stable-diffusion-xl-base-1.0" API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}" headers = {"Authorization": f"Bearer {HF_API_TOKEN}"} WATERMARK_TEXT = "SelamGPT" MAX_RETRIES = 3 TIMEOUT = 45 # Increased timeout for larger images EXECUTOR = ThreadPoolExecutor(max_workers=2) # Handle concurrent requests # ===== WATERMARK FUNCTION ===== def add_watermark(image_bytes): """Add professional watermark with fallback fonts""" try: image = Image.open(io.BytesIO(image_bytes)).convert("RGB") draw = ImageDraw.Draw(image) # Try multiple font options font = None for font_path in [ "Roboto-Bold.ttf", # Our Dockerfile installs this "DejaVuSans-Bold.ttf", "FreeSansBold.ttf", None # Final fallback to default ]: try: size = min(image.width // 15, 40) # Responsive sizing font = ImageFont.truetype(font_path, size) if font_path else ImageFont.load_default(size) break except: continue # Calculate dynamic position bbox = draw.textbbox((0, 0), WATERMARK_TEXT, font=font) text_w, text_h = bbox[2] - bbox[0], bbox[3] - bbox[1] margin = image.width // 50 position = (image.width - text_w - margin, image.height - text_h - margin) # Draw with outline effect for offset in [(-1,-1), (1,1)]: # Shadow positions draw.text( (position[0]+offset[0], position[1]+offset[1]), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 180) # Semi-transparent black draw.text( position, WATERMARK_TEXT, font=font, fill=(255, 255, 255, 200)) # Semi-transparent white return image except Exception as e: print(f"Watermark error: {str(e)}") return Image.open(io.BytesIO(image_bytes)) # Fallback to original # ===== IMAGE GENERATION ===== def generate_image(prompt): """Generate image with robust error handling""" if not prompt.strip(): return None, "⚠️ Please enter a prompt" def api_call(): return requests.post( API_URL, headers=headers, json={ "inputs": prompt, "parameters": { "height": 768, "width": 768, "num_inference_steps": 25 }, "options": {"wait_for_model": True} }, timeout=TIMEOUT ) for attempt in range(MAX_RETRIES): try: future = EXECUTOR.submit(api_call) response = future.result() if response.status_code == 200: return add_watermark(response.content), "✔️ Generation successful" elif response.status_code == 503: wait_time = (attempt + 1) * 10 print(f"Model loading, waiting {wait_time}s...") time.sleep(wait_time) continue else: return None, f"⚠️ API Error: {response.text[:200]}" except requests.Timeout: return None, "⚠️ Timeout: Model took too long to respond" except Exception as e: return None, f"⚠️ Unexpected error: {str(e)[:200]}" return None, "⚠️ Failed after multiple attempts. Please try again later." # ===== GRADIO INTERFACE ===== theme = gr.themes.Default( primary_hue="emerald", secondary_hue="amber", font=[gr.themes.GoogleFont("Poppins"), "Arial", "sans-serif"] ) with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo: gr.Markdown(""" # 🎨 SelamGPT Image Generator *Generate watermarked images with Stable Diffusion XL* """) with gr.Row(): with gr.Column(scale=3): prompt_input = gr.Textbox( label="Describe your image", placeholder="A futuristic Ethiopian city with flying cars...", lines=3, max_lines=5, elem_id="prompt-box" ) with gr.Row(): generate_btn = gr.Button("Generate Image", variant="primary") clear_btn = gr.Button("Clear") gr.Examples( examples=[ ["An ancient Aksumite warrior in cyberpunk armor"], ["Traditional Ethiopian coffee ceremony in space"], ["Hyper-realistic portrait of a Habesha woman with neon tribal markings"] ], inputs=prompt_input, label="Example Prompts" ) with gr.Column(scale=2): output_image = gr.Image( label="Generated Image", height=512, elem_id="output-image" ) status_output = gr.Textbox( label="Status", interactive=False, elem_id="status-box" ) # Event handlers generate_btn.click( fn=generate_image, inputs=prompt_input, outputs=[output_image, status_output], queue=True, show_progress="minimal" ) clear_btn.click( fn=lambda: [None, ""], outputs=[output_image, status_output] ) # ===== DEPLOYMENT CONFIG ===== if __name__ == "__main__": demo.queue(concurrency_count=2, api_open=False) demo.launch( server_name="0.0.0.0", server_port=7860, favicon_path="./favicon.ico" # Optional )