snackshell commited on
Commit
9aeab3c
·
verified ·
1 Parent(s): 460870c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -35
app.py CHANGED
@@ -8,47 +8,45 @@ from concurrent.futures import ThreadPoolExecutor
8
 
9
  # ===== CONFIGURATION =====
10
  HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
11
- MODEL_NAME = "stabilityai/stable-diffusion-xl-base-1.0"
12
  API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
13
  headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
14
  WATERMARK_TEXT = "SelamGPT"
15
  MAX_RETRIES = 3
16
- TIMEOUT = 45 # Increased timeout for larger images
17
- EXECUTOR = ThreadPoolExecutor(max_workers=2) # Handle concurrent requests
18
 
19
  # ===== WATERMARK FUNCTION =====
20
  def add_watermark(image_bytes):
21
- """Add watermark with smaller text and simplified positioning"""
22
  try:
23
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
24
  draw = ImageDraw.Draw(image)
25
 
26
- # Smaller font size (24 instead of 40)
27
  font_size = 24
28
  try:
29
  font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
30
  except:
31
  font = ImageFont.load_default(font_size)
32
 
33
- text = "SelamGPT"
34
- margin = 10 # Reduced from 20
 
 
35
 
36
- # Calculate position using textlength
37
- text_width = draw.textlength(text, font=font)
38
- x = image.width - text_width - margin
39
- y = image.height - 30 # Fixed vertical position
40
-
41
- # Simpler white text without transparency
42
- draw.text((x, y), text, font=font, fill=(255, 255, 255))
43
 
44
  return image
45
  except Exception as e:
46
  print(f"Watermark error: {str(e)}")
47
  return Image.open(io.BytesIO(image_bytes))
48
 
49
- # ===== IMAGE GENERATION =====
50
  def generate_image(prompt):
51
- """Generate image with robust error handling"""
52
  if not prompt.strip():
53
  return None, "⚠️ Please enter a prompt"
54
 
@@ -59,9 +57,10 @@ def generate_image(prompt):
59
  json={
60
  "inputs": prompt,
61
  "parameters": {
62
- "height": 768,
63
- "width": 768,
64
- "num_inference_steps": 25
 
65
  },
66
  "options": {"wait_for_model": True}
67
  },
@@ -76,18 +75,18 @@ def generate_image(prompt):
76
  if response.status_code == 200:
77
  return add_watermark(response.content), "✔️ Generation successful"
78
  elif response.status_code == 503:
79
- wait_time = (attempt + 1) * 10
80
  print(f"Model loading, waiting {wait_time}s...")
81
  time.sleep(wait_time)
82
  continue
83
  else:
84
  return None, f"⚠️ API Error: {response.text[:200]}"
85
  except requests.Timeout:
86
- return None, "⚠️ Timeout: Model took too long to respond"
87
  except Exception as e:
88
  return None, f"⚠️ Unexpected error: {str(e)[:200]}"
89
 
90
- return None, "⚠️ Failed after multiple attempts. Please try again later."
91
 
92
  # ===== GRADIO INTERFACE =====
93
  theme = gr.themes.Default(
@@ -99,7 +98,7 @@ theme = gr.themes.Default(
99
  with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
100
  gr.Markdown("""
101
  # 🎨 SelamGPT Image Generator
102
- *Generate watermarked images with Stable Diffusion XL*
103
  """)
104
 
105
  with gr.Row():
@@ -117,17 +116,17 @@ with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
117
 
118
  gr.Examples(
119
  examples=[
120
- ["An ancient Aksumite warrior in cyberpunk armor"],
121
- ["Traditional Ethiopian coffee ceremony in space"],
122
- ["Hyper-realistic portrait of a Habesha woman with neon tribal markings"]
123
  ],
124
  inputs=prompt_input,
125
- label="Example Prompts"
126
  )
127
 
128
  with gr.Column(scale=2):
129
  output_image = gr.Image(
130
- label="Generated Image",
131
  height=512,
132
  elem_id="output-image"
133
  )
@@ -137,7 +136,6 @@ with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
137
  elem_id="status-box"
138
  )
139
 
140
- # Event handlers
141
  generate_btn.click(
142
  fn=generate_image,
143
  inputs=prompt_input,
@@ -151,10 +149,6 @@ with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
151
  outputs=[output_image, status_output]
152
  )
153
 
154
- # ===== DEPLOYMENT CONFIG =====
155
  if __name__ == "__main__":
156
- demo.queue(max_size=2) # Correct parameter name
157
- demo.launch(
158
- server_name="0.0.0.0",
159
- server_port=7860
160
- )
 
8
 
9
  # ===== CONFIGURATION =====
10
  HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
11
+ MODEL_NAME = "stabilityai/stable-diffusion-xl-base-1.0" # Using SDXL
12
  API_URL = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
13
  headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
14
  WATERMARK_TEXT = "SelamGPT"
15
  MAX_RETRIES = 3
16
+ TIMEOUT = 60 # Increased for SDXL's longer processing
17
+ EXECUTOR = ThreadPoolExecutor(max_workers=2)
18
 
19
  # ===== WATERMARK FUNCTION =====
20
  def add_watermark(image_bytes):
21
+ """Add clean watermark with small text in bottom-right"""
22
  try:
23
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
24
  draw = ImageDraw.Draw(image)
25
 
26
+ # Font setup (smaller size)
27
  font_size = 24
28
  try:
29
  font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
30
  except:
31
  font = ImageFont.load_default(font_size)
32
 
33
+ # Positioning (10px margin from edges)
34
+ text_width = draw.textlength(WATERMARK_TEXT, font=font)
35
+ x = image.width - text_width - 10
36
+ y = image.height - 34 # Slightly above bottom edge
37
 
38
+ # Draw white text with slight shadow for readability
39
+ draw.text((x+1, y+1), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 128)) # Shadow
40
+ draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255)) # Main text
 
 
 
 
41
 
42
  return image
43
  except Exception as e:
44
  print(f"Watermark error: {str(e)}")
45
  return Image.open(io.BytesIO(image_bytes))
46
 
47
+ # ===== IMAGE GENERATION (SDXL-OPTIMIZED) =====
48
  def generate_image(prompt):
49
+ """Generate image with SDXL-specific parameters"""
50
  if not prompt.strip():
51
  return None, "⚠️ Please enter a prompt"
52
 
 
57
  json={
58
  "inputs": prompt,
59
  "parameters": {
60
+ "height": 1024, # SDXL's native resolution
61
+ "width": 1024,
62
+ "num_inference_steps": 30, # Better quality than 25
63
+ "guidance_scale": 7.5 # SDXL's optimal value
64
  },
65
  "options": {"wait_for_model": True}
66
  },
 
75
  if response.status_code == 200:
76
  return add_watermark(response.content), "✔️ Generation successful"
77
  elif response.status_code == 503:
78
+ wait_time = (attempt + 1) * 15 # Longer wait for SDXL
79
  print(f"Model loading, waiting {wait_time}s...")
80
  time.sleep(wait_time)
81
  continue
82
  else:
83
  return None, f"⚠️ API Error: {response.text[:200]}"
84
  except requests.Timeout:
85
+ return None, f"⚠️ Timeout: Model took >{TIMEOUT}s to respond"
86
  except Exception as e:
87
  return None, f"⚠️ Unexpected error: {str(e)[:200]}"
88
 
89
+ return None, "⚠️ Failed after multiple attempts. Please try later."
90
 
91
  # ===== GRADIO INTERFACE =====
92
  theme = gr.themes.Default(
 
98
  with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
99
  gr.Markdown("""
100
  # 🎨 SelamGPT Image Generator
101
+ *Now powered by Stable Diffusion XL (1024x1024 resolution)*
102
  """)
103
 
104
  with gr.Row():
 
116
 
117
  gr.Examples(
118
  examples=[
119
+ ["An ancient Aksumite warrior in cyberpunk armor, 4k detailed"],
120
+ ["Traditional Ethiopian coffee ceremony in zero gravity, photorealistic"],
121
+ ["Portrait of a Habesha queen with golden jewelry, studio lighting"]
122
  ],
123
  inputs=prompt_input,
124
+ label="Try these SDXL-optimized prompts:"
125
  )
126
 
127
  with gr.Column(scale=2):
128
  output_image = gr.Image(
129
+ label="Generated Image (1024x1024)",
130
  height=512,
131
  elem_id="output-image"
132
  )
 
136
  elem_id="status-box"
137
  )
138
 
 
139
  generate_btn.click(
140
  fn=generate_image,
141
  inputs=prompt_input,
 
149
  outputs=[output_image, status_output]
150
  )
151
 
 
152
  if __name__ == "__main__":
153
+ demo.queue(max_size=2)
154
+ demo.launch(server_name="0.0.0.0", server_port=7860)