snackshell commited on
Commit
b032243
·
verified ·
1 Parent(s): d6e7094

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -64
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import os
2
  import gradio as gr
3
  from PIL import Image, ImageDraw, ImageFont
4
  import io
@@ -11,25 +10,21 @@ WATERMARK_TEXT = "SelamGPT"
11
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
12
  TORCH_DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
13
 
14
- # ===== MODEL LOADING =====
15
- # Global variable for model caching (alternative to @gr.Cache)
16
- pipe = None
17
-
18
  def load_model():
19
- global pipe
20
- if pipe is None:
21
- pipe = DiffusionPipeline.from_pretrained(
22
- MODEL_NAME,
23
- torch_dtype=TORCH_DTYPE
24
- ).to(DEVICE)
25
-
26
- # Optimizations
27
- if DEVICE == "cuda":
28
- try:
29
- pipe.enable_xformers_memory_efficient_attention()
30
- except:
31
- print("Xformers not available, using default attention")
32
- pipe.enable_attention_slicing()
33
 
34
  return pipe
35
 
@@ -39,23 +34,25 @@ def add_watermark(image):
39
  try:
40
  draw = ImageDraw.Draw(image)
41
 
42
- font_size = 24
43
  try:
44
  font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
45
  except:
46
  font = ImageFont.load_default(font_size)
47
 
48
  text_width = draw.textlength(WATERMARK_TEXT, font=font)
49
- x = image.width - text_width - 10
50
- y = image.height - 34
 
51
 
52
- draw.text((x+1, y+1), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 128))
53
- draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255))
 
 
54
 
55
- # Convert to optimized PNG
56
  img_byte_arr = io.BytesIO()
57
- image.save(img_byte_arr, format='PNG', optimize=True, quality=85)
58
- img_byte_arr.seek(0)
59
  return Image.open(img_byte_arr)
60
  except Exception as e:
61
  print(f"Watermark error: {str(e)}")
@@ -64,48 +61,43 @@ def add_watermark(image):
64
  # ===== IMAGE GENERATION =====
65
  def generate_image(prompt):
66
  if not prompt.strip():
67
- return None, "⚠️ Please enter a prompt"
68
 
69
  try:
70
  model = load_model()
71
- image = model(
72
  prompt,
73
  num_inference_steps=30,
74
- guidance_scale=7.5
75
- ).images[0]
76
-
77
- return add_watermark(image), "✔️ Generation successful"
 
78
 
79
  except torch.cuda.OutOfMemoryError:
80
- return None, "⚠️ Out of memory! Try a simpler prompt"
81
  except Exception as e:
82
- return None, f"⚠️ Error: {str(e)[:200]}"
83
 
84
- # ===== GRADIO THEME =====
85
- theme = gr.themes.Default(
86
  primary_hue="emerald",
87
- secondary_hue="amber",
88
  font=[gr.themes.GoogleFont("Poppins"), "Arial", "sans-serif"]
89
- )
90
-
91
- # ===== GRADIO INTERFACE =====
92
- with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
93
- gr.Markdown("""
94
- # 🎨 SelamGPT Image Generator
95
- *Powered by HiDream-I1-Full (1024x1024 PNG output)*
96
- """)
97
 
98
- with gr.Row():
 
 
99
  with gr.Column(scale=3):
100
  prompt_input = gr.Textbox(
101
  label="Describe your image",
102
  placeholder="A futuristic Ethiopian city with flying cars...",
103
  lines=3,
104
- max_lines=5
 
105
  )
106
- with gr.Row():
107
- generate_btn = gr.Button("Generate Image", variant="primary")
108
- clear_btn = gr.Button("Clear")
109
 
110
  gr.Examples(
111
  examples=[
@@ -113,33 +105,41 @@ with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
113
  ["Traditional Ethiopian coffee ceremony in zero gravity"],
114
  ["Portrait of a Habesha queen with golden jewelry"]
115
  ],
116
- inputs=prompt_input
 
117
  )
118
-
119
  with gr.Column(scale=2):
120
  output_image = gr.Image(
121
  label="Generated Image",
122
  type="pil",
123
- format="png",
124
- height=512
125
  )
126
- status_output = gr.Textbox(
127
  label="Status",
128
- interactive=False
 
129
  )
130
 
 
131
  generate_btn.click(
132
  fn=generate_image,
133
  inputs=prompt_input,
134
- outputs=[output_image, status_output],
135
- queue=True
136
  )
137
 
138
- clear_btn.click(
139
- fn=lambda: [None, ""],
140
- outputs=[output_image, status_output]
 
 
141
  )
142
 
143
  if __name__ == "__main__":
144
- demo.queue(max_size=2)
145
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
1
  import gradio as gr
2
  from PIL import Image, ImageDraw, ImageFont
3
  import io
 
10
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
11
  TORCH_DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
12
 
13
+ # ===== MODEL LOADING WITH GRADIO CACHE =====
14
+ @gr.Cache() # Now works in Gradio 5.x
 
 
15
  def load_model():
16
+ pipe = DiffusionPipeline.from_pretrained(
17
+ MODEL_NAME,
18
+ torch_dtype=TORCH_DTYPE
19
+ ).to(DEVICE)
20
+
21
+ # Optimizations
22
+ if DEVICE == "cuda":
23
+ try:
24
+ pipe.enable_xformers_memory_efficient_attention()
25
+ except:
26
+ print("Xformers not available, using default attention")
27
+ pipe.enable_attention_slicing()
 
 
28
 
29
  return pipe
30
 
 
34
  try:
35
  draw = ImageDraw.Draw(image)
36
 
37
+ font_size = max(24, int(image.width * 0.03)) # Dynamic font sizing
38
  try:
39
  font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
40
  except:
41
  font = ImageFont.load_default(font_size)
42
 
43
  text_width = draw.textlength(WATERMARK_TEXT, font=font)
44
+ margin = image.width * 0.02 # Dynamic margin
45
+ x = image.width - text_width - margin
46
+ y = image.height - (font_size * 1.5)
47
 
48
+ # Shadow effect
49
+ draw.text((x+2, y+2), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 150))
50
+ # Main text
51
+ draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 215, 0)) # Gold color
52
 
53
+ # Optimized PNG output
54
  img_byte_arr = io.BytesIO()
55
+ image.save(img_byte_arr, format='PNG', optimize=True)
 
56
  return Image.open(img_byte_arr)
57
  except Exception as e:
58
  print(f"Watermark error: {str(e)}")
 
61
  # ===== IMAGE GENERATION =====
62
  def generate_image(prompt):
63
  if not prompt.strip():
64
+ raise gr.Error("Please enter a prompt")
65
 
66
  try:
67
  model = load_model()
68
+ result = model(
69
  prompt,
70
  num_inference_steps=30,
71
+ guidance_scale=7.5,
72
+ width=1024,
73
+ height=1024
74
+ )
75
+ return add_watermark(result.images[0]), "🎨 Generation complete!"
76
 
77
  except torch.cuda.OutOfMemoryError:
78
+ raise gr.Error("Out of memory! Try a simpler prompt or smaller image size")
79
  except Exception as e:
80
+ raise gr.Error(f"Generation failed: {str(e)[:200]}")
81
 
82
+ # ===== GRADIO 5.x INTERFACE =====
83
+ with gr.Blocks(theme=gr.themes.Default(
84
  primary_hue="emerald",
85
+ secondary_hue="gold",
86
  font=[gr.themes.GoogleFont("Poppins"), "Arial", "sans-serif"]
87
+ )) as demo:
 
 
 
 
 
 
 
88
 
89
+ gr.Markdown("""<h1 align="center">🎨 SelamGPT HiDream Generator</h1>""")
90
+
91
+ with gr.Row(variant="panel"):
92
  with gr.Column(scale=3):
93
  prompt_input = gr.Textbox(
94
  label="Describe your image",
95
  placeholder="A futuristic Ethiopian city with flying cars...",
96
  lines=3,
97
+ max_lines=5,
98
+ autofocus=True
99
  )
100
+ generate_btn = gr.Button("Generate Image", variant="primary")
 
 
101
 
102
  gr.Examples(
103
  examples=[
 
105
  ["Traditional Ethiopian coffee ceremony in zero gravity"],
106
  ["Portrait of a Habesha queen with golden jewelry"]
107
  ],
108
+ inputs=prompt_input,
109
+ label="Try these prompts:"
110
  )
111
+
112
  with gr.Column(scale=2):
113
  output_image = gr.Image(
114
  label="Generated Image",
115
  type="pil",
116
+ height=512,
117
+ interactive=False
118
  )
119
+ status = gr.Textbox(
120
  label="Status",
121
+ interactive=False,
122
+ show_label=False
123
  )
124
 
125
+ # Event handlers
126
  generate_btn.click(
127
  fn=generate_image,
128
  inputs=prompt_input,
129
+ outputs=[output_image, status],
130
+ api_name="generate"
131
  )
132
 
133
+ # Keyboard shortcut
134
+ prompt_input.submit(
135
+ fn=generate_image,
136
+ inputs=prompt_input,
137
+ outputs=[output_image, status]
138
  )
139
 
140
  if __name__ == "__main__":
141
+ demo.launch(
142
+ server_name="0.0.0.0",
143
+ server_port=7860,
144
+ share=False
145
+ )