snackshell commited on
Commit
dc45b2e
·
verified ·
1 Parent(s): e4b7ce6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -102
app.py CHANGED
@@ -1,125 +1,98 @@
1
  import os
2
- import requests
3
  import gradio as gr
 
4
  from PIL import Image, ImageDraw, ImageFont
5
- import io
6
- import time
7
- from concurrent.futures import ThreadPoolExecutor
8
 
9
- # ===== CONFIGURATION =====
10
- HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
11
- MODEL_NAME = "stabilityai/stable-diffusion-xl-base-1.0"
12
- API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
13
- headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
14
  WATERMARK_TEXT = "SelamGPT"
15
- MAX_RETRIES = 3
16
- TIMEOUT = 45
17
- EXECUTOR = ThreadPoolExecutor(max_workers=3)
18
 
19
- # ===== OPTIMIZED WATERMARK FUNCTION =====
20
- def add_watermark(image_bytes):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  try:
22
- image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
23
  draw = ImageDraw.Draw(image)
24
-
25
- font_size = 24
26
- try:
27
- font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
28
- except:
29
- font = ImageFont.load_default(font_size)
30
-
31
  text_width = draw.textlength(WATERMARK_TEXT, font=font)
32
- x = image.width - text_width - 10
33
- y = image.height - 34
34
-
35
- draw.text((x+1, y+1), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 128))
36
- draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255))
37
-
38
- webp_buffer = io.BytesIO()
39
- image.save(webp_buffer, format="WEBP", quality=85)
40
- webp_buffer.seek(0)
41
- return Image.open(webp_buffer)
42
- except Exception as e:
43
- print(f"Watermark error: {str(e)}")
44
- return Image.open(io.BytesIO(image_bytes))
45
 
46
- # ===== IMAGE GENERATION =====
47
  def generate_image(prompt):
48
  if not prompt.strip():
49
  return None, "⚠️ Please enter a prompt"
50
 
51
- params = {
52
- "height": 768,
53
- "width": 768,
54
- "num_inference_steps": 20,
55
- "guidance_scale": 7.0,
56
- "options": {"wait_for_model": False}
57
- }
58
-
59
- def api_call():
60
- return requests.post(
61
- API_URL,
62
- headers=headers,
63
- json={
64
- "inputs": prompt,
65
- "parameters": params
66
- },
67
- timeout=TIMEOUT
68
  )
69
-
70
- for attempt in range(MAX_RETRIES):
71
- try:
72
- start_time = time.time()
73
- response = EXECUTOR.submit(api_call).result()
74
-
75
- if response.status_code == 200:
76
- gen_time = time.time() - start_time
77
- return add_watermark(response.content), f"✔️ Generated in {gen_time:.1f}s"
78
- elif response.status_code == 503:
79
- time.sleep(5 * (attempt + 1))
80
- continue
81
- else:
82
- return None, f"⚠️ API Error: {response.text[:200]}"
83
- except requests.Timeout:
84
- return None, f"⚠️ Timeout: Model took >{TIMEOUT}s"
85
- except Exception as e:
86
- return None, f"⚠️ Error: {str(e)[:200]}"
87
-
88
- return None, "⚠️ Failed after retries. Try again."
89
 
90
- # ===== GRADIO INTERFACE =====
91
- with gr.Blocks(title="SelamGPT Image Generator") as demo:
92
  gr.Markdown("""
93
- # 🎨 SelamGPT Image Generator
94
- *Optimized for speed (WebP output @ 768px)*
95
  """)
96
 
97
  with gr.Row():
98
- with gr.Column(scale=3):
99
  prompt_input = gr.Textbox(
100
  label="Describe your image",
101
- placeholder="A futuristic Ethiopian city...",
102
  lines=3
103
  )
104
- with gr.Row():
105
- generate_btn = gr.Button("Generate Image", variant="primary")
106
- clear_btn = gr.Button("Clear")
107
 
108
  gr.Examples(
109
  examples=[
110
- ["An ancient Aksumite warrior in cyberpunk armor"],
111
- ["Traditional Ethiopian coffee ceremony"],
112
- ["Habesha queen with jewelry"]
113
  ],
114
  inputs=prompt_input
115
  )
116
 
117
- with gr.Column(scale=2):
118
  output_image = gr.Image(
119
- label="Generated Image (WebP)",
120
  type="pil",
121
- format="webp",
122
- height=512
123
  )
124
  status_output = gr.Textbox(
125
  label="Status",
@@ -129,20 +102,12 @@ with gr.Blocks(title="SelamGPT Image Generator") as demo:
129
  generate_btn.click(
130
  fn=generate_image,
131
  inputs=prompt_input,
132
- outputs=[output_image, status_output],
133
- queue=True
134
- )
135
-
136
- clear_btn.click(
137
- fn=lambda: [None, ""],
138
  outputs=[output_image, status_output]
139
  )
140
 
141
  if __name__ == "__main__":
142
- # Version-compatible queue setup
143
- try:
144
- demo.queue(concurrency_count=3) # New Gradio
145
- except TypeError:
146
- demo.queue(max_size=3) # Old Gradio
147
-
148
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import os
2
+ import torch
3
  import gradio as gr
4
+ from diffusers import DiffusionPipeline
5
  from PIL import Image, ImageDraw, ImageFont
 
 
 
6
 
7
+ # ===== FREE-TIER CONFIG =====
 
 
 
 
8
  WATERMARK_TEXT = "SelamGPT"
9
+ MODEL_NAME = "DeepFloyd/IF-II-L-v1.0"
10
+ CACHE_DIR = "model_cache" # For free tier storage limits
 
11
 
12
+ # ===== LIGHTWEIGHT MODEL LOAD =====
13
+ pipe = None # Lazy load to avoid cold start timeouts
14
+
15
+ def load_model():
16
+ global pipe
17
+ if pipe is None:
18
+ pipe = DiffusionPipeline.from_pretrained(
19
+ MODEL_NAME,
20
+ torch_dtype=torch.float16, # 50% VRAM reduction
21
+ variant="fp16",
22
+ cache_dir=CACHE_DIR
23
+ )
24
+ pipe.enable_model_cpu_offload() # Critical for free-tier VRAM
25
+
26
+ # ===== OPTIMIZED WATERMARK =====
27
+ def add_watermark(image):
28
  try:
 
29
  draw = ImageDraw.Draw(image)
30
+ font = ImageFont.load_default(20) # No external font needed
 
 
 
 
 
 
31
  text_width = draw.textlength(WATERMARK_TEXT, font=font)
32
+ draw.text(
33
+ (image.width - text_width - 15, image.height - 30),
34
+ WATERMARK_TEXT,
35
+ font=font,
36
+ fill=(255, 255, 255)
37
+ )
38
+ return image
39
+ except Exception:
40
+ return image
 
 
 
 
41
 
42
+ # ===== FREE-TIER GENERATION =====
43
  def generate_image(prompt):
44
  if not prompt.strip():
45
  return None, "⚠️ Please enter a prompt"
46
 
47
+ try:
48
+ load_model() # Lazy load only when needed
49
+
50
+ # Free-tier optimized settings
51
+ result = pipe(
52
+ prompt=prompt,
53
+ output_type="pil",
54
+ generator=torch.Generator().manual_seed(42), # Consistent results
55
+ num_inference_steps=30, # Reduced from default 50
56
+ guidance_scale=7.0 # Balanced creativity/quality
 
 
 
 
 
 
 
57
  )
58
+
59
+ return add_watermark(result.images[0]), "✔️ Generated (Free Tier)"
60
+ except torch.cuda.OutOfMemoryError:
61
+ return None, "⚠️ Out of VRAM - Try simpler prompt"
62
+ except Exception as e:
63
+ return None, f"⚠️ Error: {str(e)[:100]}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
+ # ===== GRADIO UI =====
66
+ with gr.Blocks(title="SelamGPT Pro") as demo:
67
  gr.Markdown("""
68
+ # 🎨 SelamGPT (DeepFloyd IF-II-L)
69
+ *Optimized for Free Tier - 64px Base Resolution*
70
  """)
71
 
72
  with gr.Row():
73
+ with gr.Column():
74
  prompt_input = gr.Textbox(
75
  label="Describe your image",
76
+ placeholder="A traditional Ethiopian market...",
77
  lines=3
78
  )
79
+ generate_btn = gr.Button("Generate", variant="primary")
 
 
80
 
81
  gr.Examples(
82
  examples=[
83
+ ["Habesha cultural dress with intricate patterns, studio lighting"],
84
+ ["Lalibela rock-hewn churches at golden hour"],
85
+ ["Addis Ababa futuristic skyline, cyberpunk style"]
86
  ],
87
  inputs=prompt_input
88
  )
89
 
90
+ with gr.Column():
91
  output_image = gr.Image(
92
+ label="Generated Image",
93
  type="pil",
94
+ format="webp", # Lightweight format
95
+ height=400
96
  )
97
  status_output = gr.Textbox(
98
  label="Status",
 
102
  generate_btn.click(
103
  fn=generate_image,
104
  inputs=prompt_input,
 
 
 
 
 
 
105
  outputs=[output_image, status_output]
106
  )
107
 
108
  if __name__ == "__main__":
109
+ demo.launch(
110
+ server_name="0.0.0.0",
111
+ server_port=7860,
112
+ enable_queue=False # Critical for free tier
113
+ )