snackshell commited on
Commit
e3617aa
·
verified ·
1 Parent(s): ead7a0e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -26
app.py CHANGED
@@ -7,27 +7,25 @@ from PIL import Image, ImageDraw, ImageFont
7
  # ===== FREE-TIER CONFIG =====
8
  WATERMARK_TEXT = "SelamGPT"
9
  MODEL_NAME = "DeepFloyd/IF-II-L-v1.0"
10
- CACHE_DIR = "model_cache" # For free tier storage limits
11
 
12
- # ===== LIGHTWEIGHT MODEL LOAD =====
13
- pipe = None # Lazy load to avoid cold start timeouts
14
 
15
  def load_model():
16
  global pipe
17
  if pipe is None:
18
  pipe = DiffusionPipeline.from_pretrained(
19
  MODEL_NAME,
20
- torch_dtype=torch.float16, # 50% VRAM reduction
21
- variant="fp16",
22
- cache_dir=CACHE_DIR
23
  )
24
- pipe.enable_model_cpu_offload() # Critical for free-tier VRAM
25
 
26
  # ===== OPTIMIZED WATERMARK =====
27
  def add_watermark(image):
28
  try:
29
  draw = ImageDraw.Draw(image)
30
- font = ImageFont.load_default(20) # No external font needed
31
  text_width = draw.textlength(WATERMARK_TEXT, font=font)
32
  draw.text(
33
  (image.width - text_width - 15, image.height - 30),
@@ -39,50 +37,49 @@ def add_watermark(image):
39
  except Exception:
40
  return image
41
 
42
- # ===== FREE-TIER GENERATION =====
43
  def generate_image(prompt):
44
  if not prompt.strip():
45
  return None, "⚠️ Please enter a prompt"
46
 
47
  try:
48
- load_model() # Lazy load only when needed
49
 
50
- # Free-tier optimized settings
51
  result = pipe(
52
  prompt=prompt,
53
  output_type="pil",
54
- generator=torch.Generator().manual_seed(42), # Consistent results
55
- num_inference_steps=30, # Reduced from default 50
56
- guidance_scale=7.0 # Balanced creativity/quality
57
  )
58
 
59
- return add_watermark(result.images[0]), "✔️ Generated (Free Tier)"
60
  except torch.cuda.OutOfMemoryError:
61
- return None, "⚠️ Out of VRAM - Try simpler prompt"
62
  except Exception as e:
63
- return None, f"⚠️ Error: {str(e)[:100]}"
64
 
65
- # ===== GRADIO UI =====
66
  with gr.Blocks(title="SelamGPT Pro") as demo:
67
  gr.Markdown("""
68
  # 🎨 SelamGPT (DeepFloyd IF-II-L)
69
- *Optimized for Free Tier - 64px Base Resolution*
70
  """)
71
 
72
  with gr.Row():
73
  with gr.Column():
74
  prompt_input = gr.Textbox(
75
  label="Describe your image",
76
- placeholder="A traditional Ethiopian market...",
77
  lines=3
78
  )
79
  generate_btn = gr.Button("Generate", variant="primary")
80
 
81
  gr.Examples(
82
  examples=[
83
- ["Habesha cultural dress with intricate patterns, studio lighting"],
84
- ["Lalibela rock-hewn churches at golden hour"],
85
- ["Addis Ababa futuristic skyline, cyberpunk style"]
86
  ],
87
  inputs=prompt_input
88
  )
@@ -91,7 +88,7 @@ with gr.Blocks(title="SelamGPT Pro") as demo:
91
  output_image = gr.Image(
92
  label="Generated Image",
93
  type="pil",
94
- format="webp", # Lightweight format
95
  height=400
96
  )
97
  status_output = gr.Textbox(
@@ -108,6 +105,5 @@ with gr.Blocks(title="SelamGPT Pro") as demo:
108
  if __name__ == "__main__":
109
  demo.launch(
110
  server_name="0.0.0.0",
111
- server_port=7860,
112
- enable_queue=False # Critical for free tier
113
  )
 
7
  # ===== FREE-TIER CONFIG =====
8
  WATERMARK_TEXT = "SelamGPT"
9
  MODEL_NAME = "DeepFloyd/IF-II-L-v1.0"
 
10
 
11
+ # Initialize pipeline (lazy load later)
12
+ pipe = None
13
 
14
  def load_model():
15
  global pipe
16
  if pipe is None:
17
  pipe = DiffusionPipeline.from_pretrained(
18
  MODEL_NAME,
19
+ torch_dtype=torch.float16,
20
+ variant="fp16"
 
21
  )
22
+ pipe.to("cuda")
23
 
24
  # ===== OPTIMIZED WATERMARK =====
25
  def add_watermark(image):
26
  try:
27
  draw = ImageDraw.Draw(image)
28
+ font = ImageFont.load_default(20)
29
  text_width = draw.textlength(WATERMARK_TEXT, font=font)
30
  draw.text(
31
  (image.width - text_width - 15, image.height - 30),
 
37
  except Exception:
38
  return image
39
 
40
+ # ===== GENERATION FUNCTION =====
41
  def generate_image(prompt):
42
  if not prompt.strip():
43
  return None, "⚠️ Please enter a prompt"
44
 
45
  try:
46
+ load_model()
47
 
 
48
  result = pipe(
49
  prompt=prompt,
50
  output_type="pil",
51
+ generator=torch.Generator(device="cuda").manual_seed(42),
52
+ num_inference_steps=30,
53
+ guidance_scale=7.0
54
  )
55
 
56
+ return add_watermark(result.images[0]), "✔️ Generation successful"
57
  except torch.cuda.OutOfMemoryError:
58
+ return None, "⚠️ Out of memory - Try a simpler prompt"
59
  except Exception as e:
60
+ return None, f"⚠️ Error: {str(e)[:200]}"
61
 
62
+ # ===== GRADIO INTERFACE =====
63
  with gr.Blocks(title="SelamGPT Pro") as demo:
64
  gr.Markdown("""
65
  # 🎨 SelamGPT (DeepFloyd IF-II-L)
66
+ *Free Tier Optimized - May take 2-3 minutes for first generation*
67
  """)
68
 
69
  with gr.Row():
70
  with gr.Column():
71
  prompt_input = gr.Textbox(
72
  label="Describe your image",
73
+ placeholder="A traditional Ethiopian market scene...",
74
  lines=3
75
  )
76
  generate_btn = gr.Button("Generate", variant="primary")
77
 
78
  gr.Examples(
79
  examples=[
80
+ ["Habesha cultural dress with gold embroidery, studio lighting"],
81
+ ["Lalibela churches at sunrise, foggy morning"],
82
+ ["Futuristic Addis Ababa with Ethiopian architecture"]
83
  ],
84
  inputs=prompt_input
85
  )
 
88
  output_image = gr.Image(
89
  label="Generated Image",
90
  type="pil",
91
+ format="webp",
92
  height=400
93
  )
94
  status_output = gr.Textbox(
 
105
  if __name__ == "__main__":
106
  demo.launch(
107
  server_name="0.0.0.0",
108
+ server_port=7860
 
109
  )