File size: 6,010 Bytes
882e052
 
c8f1f54
4fe456a
882e052
 
087c578
c8f1f54
087c578
882e052
087c578
882e052
 
 
 
087c578
 
c8f1f54
087c578
882e052
087c578
882e052
 
 
 
087c578
 
 
 
 
 
 
 
 
 
 
 
 
 
882e052
087c578
882e052
087c578
 
 
 
 
 
 
 
 
 
b23fb24
882e052
 
 
 
 
087c578
882e052
 
 
 
087c578
c8f1f54
087c578
882e052
087c578
882e052
087c578
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
882e052
 
 
087c578
 
882e052
 
087c578
 
 
 
 
882e052
 
087c578
882e052
087c578
882e052
087c578
882e052
087c578
 
 
 
 
 
 
 
4fe456a
087c578
882e052
 
087c578
882e052
 
 
087c578
882e052
 
087c578
 
 
 
882e052
087c578
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
882e052
 
087c578
882e052
 
 
087c578
 
 
 
 
 
 
 
882e052
c8f1f54
087c578
c8f1f54
087c578
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import os
import requests
import gradio as gr
from PIL import Image, ImageDraw, ImageFont
import io
import time
from concurrent.futures import ThreadPoolExecutor

# ===== CONFIGURATION =====
HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
MODEL_NAME = "stabilityai/stable-diffusion-xl-base-1.0"
API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
WATERMARK_TEXT = "SelamGPT"
MAX_RETRIES = 3
TIMEOUT = 45  # Increased timeout for larger images
EXECUTOR = ThreadPoolExecutor(max_workers=2)  # Handle concurrent requests

# ===== WATERMARK FUNCTION =====
def add_watermark(image_bytes):
    """Add professional watermark with fallback fonts"""
    try:
        image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
        draw = ImageDraw.Draw(image)
        
        # Try multiple font options
        font = None
        for font_path in [
            "Roboto-Bold.ttf",  # Our Dockerfile installs this
            "DejaVuSans-Bold.ttf",
            "FreeSansBold.ttf",
            None  # Final fallback to default
        ]:
            try:
                size = min(image.width // 15, 40)  # Responsive sizing
                font = ImageFont.truetype(font_path, size) if font_path else ImageFont.load_default(size)
                break
            except:
                continue
        
        # Calculate dynamic position
        bbox = draw.textbbox((0, 0), WATERMARK_TEXT, font=font)
        text_w, text_h = bbox[2] - bbox[0], bbox[3] - bbox[1]
        margin = image.width // 50
        position = (image.width - text_w - margin, image.height - text_h - margin)
        
        # Draw with outline effect
        for offset in [(-1,-1), (1,1)]:  # Shadow positions
            draw.text(
                (position[0]+offset[0], position[1]+offset[1]),
                WATERMARK_TEXT,
                font=font,
                fill=(0, 0, 0, 180))  # Semi-transparent black
        
        draw.text(
            position,
            WATERMARK_TEXT,
            font=font,
            fill=(255, 255, 255, 200))  # Semi-transparent white
        
        return image
    except Exception as e:
        print(f"Watermark error: {str(e)}")
        return Image.open(io.BytesIO(image_bytes))  # Fallback to original

# ===== IMAGE GENERATION =====
def generate_image(prompt):
    """Generate image with robust error handling"""
    if not prompt.strip():
        return None, "⚠️ Please enter a prompt"
    
    def api_call():
        return requests.post(
            API_URL,
            headers=headers,
            json={
                "inputs": prompt,
                "parameters": {
                    "height": 768,
                    "width": 768,
                    "num_inference_steps": 25
                },
                "options": {"wait_for_model": True}
            },
            timeout=TIMEOUT
        )
    
    for attempt in range(MAX_RETRIES):
        try:
            future = EXECUTOR.submit(api_call)
            response = future.result()
            
            if response.status_code == 200:
                return add_watermark(response.content), "✔️ Generation successful"
            elif response.status_code == 503:
                wait_time = (attempt + 1) * 10
                print(f"Model loading, waiting {wait_time}s...")
                time.sleep(wait_time)
                continue
            else:
                return None, f"⚠️ API Error: {response.text[:200]}"
        except requests.Timeout:
            return None, "⚠️ Timeout: Model took too long to respond"
        except Exception as e:
            return None, f"⚠️ Unexpected error: {str(e)[:200]}"
    
    return None, "⚠️ Failed after multiple attempts. Please try again later."

# ===== GRADIO INTERFACE =====
theme = gr.themes.Default(
    primary_hue="emerald",
    secondary_hue="amber",
    font=[gr.themes.GoogleFont("Poppins"), "Arial", "sans-serif"]
)

with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
    gr.Markdown("""
    # 🎨 SelamGPT Image Generator
    *Generate watermarked images with Stable Diffusion XL*
    """)
    
    with gr.Row():
        with gr.Column(scale=3):
            prompt_input = gr.Textbox(
                label="Describe your image",
                placeholder="A futuristic Ethiopian city with flying cars...",
                lines=3,
                max_lines=5,
                elem_id="prompt-box"
            )
            with gr.Row():
                generate_btn = gr.Button("Generate Image", variant="primary")
                clear_btn = gr.Button("Clear")
            
            gr.Examples(
                examples=[
                    ["An ancient Aksumite warrior in cyberpunk armor"],
                    ["Traditional Ethiopian coffee ceremony in space"],
                    ["Hyper-realistic portrait of a Habesha woman with neon tribal markings"]
                ],
                inputs=prompt_input,
                label="Example Prompts"
            )
            
        with gr.Column(scale=2):
            output_image = gr.Image(
                label="Generated Image",
                height=512,
                elem_id="output-image"
            )
            status_output = gr.Textbox(
                label="Status",
                interactive=False,
                elem_id="status-box"
            )
    
    # Event handlers
    generate_btn.click(
        fn=generate_image,
        inputs=prompt_input,
        outputs=[output_image, status_output],
        queue=True,
        show_progress="minimal"
    )
    
    clear_btn.click(
        fn=lambda: [None, ""],
        outputs=[output_image, status_output]
    )

# ===== DEPLOYMENT CONFIG =====
if __name__ == "__main__":
    demo.queue(concurrency_count=2, api_open=False)
    demo.launch(
        server_name="0.0.0.0",
        server_port=7860,
        favicon_path="./favicon.ico"  # Optional
    )