snackshell commited on
Commit
a346948
·
verified ·
1 Parent(s): 380a549

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -78
app.py CHANGED
@@ -12,22 +12,27 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
12
  TORCH_DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
13
 
14
  # ===== MODEL LOADING =====
15
- @gr.Cache() # Cache model between generations
 
 
16
  def load_model():
17
- pipe = DiffusionPipeline.from_pretrained(
18
- MODEL_NAME,
19
- torch_dtype=TORCH_DTYPE
20
- ).to(DEVICE)
21
-
22
- # Optimizations
23
- if DEVICE == "cuda":
24
- pipe.enable_xformers_memory_efficient_attention()
25
- pipe.enable_attention_slicing()
 
 
 
 
 
26
 
27
  return pipe
28
 
29
- pipe = load_model()
30
-
31
  # ===== WATERMARK FUNCTION =====
32
  def add_watermark(image):
33
  """Add watermark with optimized PNG output"""
@@ -62,81 +67,19 @@ def generate_image(prompt):
62
  return None, "⚠️ Please enter a prompt"
63
 
64
  try:
65
- # Generate image (1024x1024 by default)
66
- image = pipe(
67
  prompt,
68
  num_inference_steps=30,
69
  guidance_scale=7.5
70
  ).images[0]
71
 
72
- # Add watermark
73
- watermarked = add_watermark(image)
74
- return watermarked, "✔️ Generation successful"
75
 
76
  except torch.cuda.OutOfMemoryError:
77
  return None, "⚠️ Out of memory! Try a simpler prompt"
78
  except Exception as e:
79
  return None, f"⚠️ Error: {str(e)[:200]}"
80
 
81
- # ===== GRADIO THEME =====
82
- theme = gr.themes.Default(
83
- primary_hue="emerald",
84
- secondary_hue="amber",
85
- font=[gr.themes.GoogleFont("Poppins"), "Arial", "sans-serif"]
86
- )
87
-
88
  # ===== GRADIO INTERFACE =====
89
- with gr.Blocks(theme=theme, title="SelamGPT Image Generator") as demo:
90
- gr.Markdown("""
91
- # 🎨 SelamGPT Image Generator
92
- *Powered by HiDream-I1-Full (1024x1024 PNG output)*
93
- """)
94
-
95
- with gr.Row():
96
- with gr.Column(scale=3):
97
- prompt_input = gr.Textbox(
98
- label="Describe your image",
99
- placeholder="A futuristic Ethiopian city with flying cars...",
100
- lines=3,
101
- max_lines=5
102
- )
103
- with gr.Row():
104
- generate_btn = gr.Button("Generate Image", variant="primary")
105
- clear_btn = gr.Button("Clear")
106
-
107
- gr.Examples(
108
- examples=[
109
- ["An ancient Aksumite warrior in cyberpunk armor, 4k detailed"],
110
- ["Traditional Ethiopian coffee ceremony in zero gravity"],
111
- ["Portrait of a Habesha queen with golden jewelry"]
112
- ],
113
- inputs=prompt_input
114
- )
115
-
116
- with gr.Column(scale=2):
117
- output_image = gr.Image(
118
- label="Generated Image",
119
- type="pil",
120
- format="png",
121
- height=512
122
- )
123
- status_output = gr.Textbox(
124
- label="Status",
125
- interactive=False
126
- )
127
-
128
- generate_btn.click(
129
- fn=generate_image,
130
- inputs=prompt_input,
131
- outputs=[output_image, status_output],
132
- queue=True
133
- )
134
-
135
- clear_btn.click(
136
- fn=lambda: [None, ""],
137
- outputs=[output_image, status_output]
138
- )
139
-
140
- if __name__ == "__main__":
141
- demo.queue(max_size=2)
142
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
12
  TORCH_DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
13
 
14
  # ===== MODEL LOADING =====
15
+ # Global variable for model caching (alternative to @gr.Cache)
16
+ pipe = None
17
+
18
  def load_model():
19
+ global pipe
20
+ if pipe is None:
21
+ pipe = DiffusionPipeline.from_pretrained(
22
+ MODEL_NAME,
23
+ torch_dtype=TORCH_DTYPE
24
+ ).to(DEVICE)
25
+
26
+ # Optimizations
27
+ if DEVICE == "cuda":
28
+ try:
29
+ pipe.enable_xformers_memory_efficient_attention()
30
+ except:
31
+ print("Xformers not available, using default attention")
32
+ pipe.enable_attention_slicing()
33
 
34
  return pipe
35
 
 
 
36
  # ===== WATERMARK FUNCTION =====
37
  def add_watermark(image):
38
  """Add watermark with optimized PNG output"""
 
67
  return None, "⚠️ Please enter a prompt"
68
 
69
  try:
70
+ model = load_model()
71
+ image = model(
72
  prompt,
73
  num_inference_steps=30,
74
  guidance_scale=7.5
75
  ).images[0]
76
 
77
+ return add_watermark(image), "✔️ Generation successful"
 
 
78
 
79
  except torch.cuda.OutOfMemoryError:
80
  return None, "⚠️ Out of memory! Try a simpler prompt"
81
  except Exception as e:
82
  return None, f"⚠️ Error: {str(e)[:200]}"
83
 
 
 
 
 
 
 
 
84
  # ===== GRADIO INTERFACE =====
85
+ # ... (keep your existing interface code exactly as is) ...