File size: 5,028 Bytes
882e052
 
c8f1f54
4fe456a
882e052
 
087c578
c8f1f54
087c578
882e052
5d4b17c
882e052
 
 
 
4cda5f1
 
c8f1f54
4cda5f1
882e052
4cda5f1
882e052
 
5d4b17c
882e052
faa166e
460870c
faa166e
460870c
faa166e
882e052
9aeab3c
faa166e
 
e86d5f6
faa166e
 
087c578
4cda5f1
 
 
 
 
882e052
 
460870c
c8f1f54
4cda5f1
882e052
 
087c578
 
4cda5f1
 
 
 
 
 
 
 
 
087c578
 
 
 
 
faa166e
4cda5f1
087c578
 
 
882e052
 
 
4cda5f1
 
882e052
 
4cda5f1
 
087c578
4cda5f1
 
882e052
 
087c578
882e052
4cda5f1
882e052
4cda5f1
882e052
4cda5f1
faa166e
3665d0a
4cda5f1
882e052
faa166e
4cda5f1
882e052
 
 
087c578
882e052
faa166e
 
4cda5f1
882e052
087c578
faa166e
087c578
 
 
 
4cda5f1
 
 
087c578
5d4b17c
087c578
 
 
 
4cda5f1
5d4b17c
4cda5f1
5d4b17c
087c578
 
 
5d4b17c
882e052
 
 
 
 
087c578
5d4b17c
087c578
 
 
 
 
882e052
c8f1f54
 
4cda5f1
9aeab3c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import os
import requests
import gradio as gr
from PIL import Image, ImageDraw, ImageFont
import io
import time
from concurrent.futures import ThreadPoolExecutor

# ===== CONFIGURATION =====
HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
MODEL_NAME = "stabilityai/stable-diffusion-xl-base-1.0"
API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
WATERMARK_TEXT = "SelamGPT"
MAX_RETRIES = 3
TIMEOUT = 45  # Reduced timeout for faster failover
EXECUTOR = ThreadPoolExecutor(max_workers=3)  # Increased workers

# ===== OPTIMIZED WATERMARK FUNCTION (WebP) =====
def add_watermark(image_bytes):
    """Add watermark with optimized WebP output (85% quality)"""
    try:
        image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
        draw = ImageDraw.Draw(image)
        
        font_size = 24
        try:
            font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
        except:
            font = ImageFont.load_default(font_size)
        
        text_width = draw.textlength(WATERMARK_TEXT, font=font)
        x = image.width - text_width - 10
        y = image.height - 34
        
        draw.text((x+1, y+1), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 128))
        draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255))
        
        # Convert to WebP (faster + smaller)
        webp_buffer = io.BytesIO()
        image.save(webp_buffer, format="WEBP", quality=85, method=6)  # 85% quality, best compression
        webp_buffer.seek(0)
        return Image.open(webp_buffer)
    except Exception as e:
        print(f"Watermark error: {str(e)}")
        return Image.open(io.BytesIO(image_bytes))

# ===== FASTER IMAGE GENERATION =====
def generate_image(prompt):
    if not prompt.strip():
        return None, "⚠️ Please enter a prompt"
    
    # Faster generation parameters
    params = {
        "height": 768,  # Slightly smaller = faster
        "width": 768,
        "num_inference_steps": 20,  # Reduced from 30
        "guidance_scale": 7.0,  # Slightly lower for speed
        "options": {"wait_for_model": False}  # Don't wait if model is loading
    }
    
    def api_call():
        return requests.post(
            API_URL,
            headers=headers,
            json={
                "inputs": prompt,
                "parameters": params
            },
            timeout=TIMEOUT
        )
    
    for attempt in range(MAX_RETRIES):
        try:
            start_time = time.time()
            response = EXECUTOR.submit(api_call).result()
            
            if response.status_code == 200:
                gen_time = time.time() - start_time
                return add_watermark(response.content), f"✔️ Generated in {gen_time:.1f}s"
            elif response.status_code == 503:
                if attempt < MAX_RETRIES - 1:
                    time.sleep(5 * (attempt + 1))  # Progressive backoff
                continue
            else:
                return None, f"⚠️ API Error: {response.text[:200]}"
        except requests.Timeout:
            return None, f"⚠️ Timeout: Model took >{TIMEOUT}s"
        except Exception as e:
            return None, f"⚠️ Error: {str(e)[:200]}"
    
    return None, "⚠️ Failed after retries. Try again."

# ===== GRADIO INTERFACE =====
with gr.Blocks(title="SelamGPT Image Generator") as demo:
    gr.Markdown("""
    # 🎨 SelamGPT Image Generator
    *Optimized for speed (WebP output @ 768px)*
    """)
    
    with gr.Row():
        with gr.Column(scale=3):
            prompt_input = gr.Textbox(
                label="Describe your image",
                placeholder="A futuristic Ethiopian city with flying cars...",
                lines=3
            )
            with gr.Row():
                generate_btn = gr.Button("Generate Image", variant="primary")
                clear_btn = gr.Button("Clear")
            
            gr.Examples(
                examples=[
                    ["An ancient Aksumite warrior in cyberpunk armor"],
                    ["Traditional Ethiopian coffee ceremony"],
                    ["Habesha queen with jewelry"]
                ],
                inputs=prompt_input
            )
            
        with gr.Column(scale=2):
            output_image = gr.Image(
                label="Generated Image (WebP)",
                type="pil",
                format="webp",  # WebP output
                height=512
            )
            status_output = gr.Textbox(
                label="Status",
                interactive=False
            )
    
    generate_btn.click(
        fn=generate_image,
        inputs=prompt_input,
        outputs=[output_image, status_output],
        queue=True
    )
    
    clear_btn.click(
        fn=lambda: [None, ""],
        outputs=[output_image, status_output]
    )

if __name__ == "__main__":
    demo.queue(concurrency_count=3)  # Increased concurrency
    demo.launch(server_name="0.0.0.0", server_port=7860)