snackshell commited on
Commit
218c1e6
·
verified ·
1 Parent(s): a1fd289

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -38
app.py CHANGED
@@ -2,10 +2,8 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
  from PIL import Image, ImageDraw, ImageFont
5
- import io
6
-
7
- from diffusers import DiffusionPipeline
8
  import torch
 
9
 
10
  # ===== CONFIG =====
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -15,7 +13,7 @@ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype,
15
  pipe.to(device)
16
 
17
  MAX_SEED = np.iinfo(np.int32).max
18
- MAX_IMAGE_SIZE = 1024
19
  WATERMARK_TEXT = "SelamGPT"
20
 
21
  # ===== WATERMARK FUNCTION =====
@@ -33,14 +31,12 @@ def add_watermark(image):
33
  draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255))
34
  return image
35
 
36
- # ===== IMAGE GENERATION FUNCTION =====
37
  def generate(
38
  prompt,
39
  negative_prompt,
40
  seed,
41
  randomize_seed,
42
- width,
43
- height,
44
  guidance_scale,
45
  num_inference_steps,
46
  progress=gr.Progress(track_tqdm=True),
@@ -55,8 +51,8 @@ def generate(
55
  result = pipe(
56
  prompt=prompt,
57
  negative_prompt=negative_prompt,
58
- width=width,
59
- height=height,
60
  guidance_scale=guidance_scale,
61
  num_inference_steps=num_inference_steps,
62
  generator=generator,
@@ -67,49 +63,38 @@ def generate(
67
 
68
  # ===== EXAMPLES =====
69
  examples = [
70
- "A futuristic Ethiopian city with flying cars",
71
- "An ancient Aksumite queen in a high-tech palace, digital painting",
72
- "A cyberpunk Habesha coffee ceremony on Mars",
73
  ]
74
 
75
  # ===== INTERFACE =====
76
- theme = gr.themes.Default(
77
- primary_hue="cyan",
78
- secondary_hue="amber",
79
- font=[gr.themes.GoogleFont("Poppins"), "sans-serif"]
80
- )
81
 
82
- css = "#container { max-width: 800px; margin: 0 auto; }"
83
-
84
- with gr.Blocks(css=css, theme=theme, title="SelamGPT Turbo Image Generator") as demo:
85
  with gr.Column(elem_id="container"):
86
- gr.Markdown("# 🎨 SelamGPT Turbo Image Generator\n*Powered by SDXL-Turbo (Fast & Creative)*")
87
 
88
  with gr.Row():
89
  prompt = gr.Textbox(
90
  label="Prompt",
91
  show_label=False,
92
- placeholder="Describe the image...",
93
- lines=2,
94
  scale=3
95
  )
96
  generate_btn = gr.Button("Generate", variant="primary")
97
 
98
- image_output = gr.Image(label="Generated Image", type="pil", format="png", height=512)
99
- seed_output = gr.Textbox(label="Seed Used", interactive=False)
100
 
101
  with gr.Accordion("⚙️ Advanced Settings", open=False):
102
- negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Things to avoid (optional)", max_lines=1)
103
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
104
- seed = gr.Slider(0, MAX_SEED, label="Seed", step=1, value=0)
105
-
106
- with gr.Row():
107
- width = gr.Slider(256, MAX_IMAGE_SIZE, step=32, label="Width", value=1024)
108
- height = gr.Slider(256, MAX_IMAGE_SIZE, step=32, label="Height", value=1024)
109
 
110
- with gr.Row():
111
- guidance_scale = gr.Slider(0.0, 10.0, step=0.1, label="Guidance Scale", value=0.0)
112
- num_inference_steps = gr.Slider(1, 50, step=1, label="Inference Steps", value=2)
113
 
114
  gr.Examples(examples=examples, inputs=[prompt])
115
 
@@ -120,13 +105,11 @@ with gr.Blocks(css=css, theme=theme, title="SelamGPT Turbo Image Generator") as
120
  negative_prompt,
121
  seed,
122
  randomize_seed,
123
- width,
124
- height,
125
  guidance_scale,
126
  num_inference_steps
127
  ],
128
- outputs=[image_output, seed_output]
129
  )
130
 
131
  if __name__ == "__main__":
132
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
2
  import numpy as np
3
  import random
4
  from PIL import Image, ImageDraw, ImageFont
 
 
 
5
  import torch
6
+ from diffusers import DiffusionPipeline
7
 
8
  # ===== CONFIG =====
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
13
  pipe.to(device)
14
 
15
  MAX_SEED = np.iinfo(np.int32).max
16
+ IMAGE_SIZE = 1024
17
  WATERMARK_TEXT = "SelamGPT"
18
 
19
  # ===== WATERMARK FUNCTION =====
 
31
  draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255))
32
  return image
33
 
34
+ # ===== INFERENCE FUNCTION =====
35
  def generate(
36
  prompt,
37
  negative_prompt,
38
  seed,
39
  randomize_seed,
 
 
40
  guidance_scale,
41
  num_inference_steps,
42
  progress=gr.Progress(track_tqdm=True),
 
51
  result = pipe(
52
  prompt=prompt,
53
  negative_prompt=negative_prompt,
54
+ width=IMAGE_SIZE,
55
+ height=IMAGE_SIZE,
56
  guidance_scale=guidance_scale,
57
  num_inference_steps=num_inference_steps,
58
  generator=generator,
 
63
 
64
  # ===== EXAMPLES =====
65
  examples = [
66
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
67
+ "An astronaut riding a green horse",
68
+ "A delicious ceviche cheesecake slice",
69
  ]
70
 
71
  # ===== INTERFACE =====
72
+ css = "#container { max-width: 700px; margin: auto; }"
 
 
 
 
73
 
74
+ with gr.Blocks(css=css, title="SelamGPT Turbo Generator") as demo:
 
 
75
  with gr.Column(elem_id="container"):
76
+ gr.Markdown("# 🖼️ SelamGPT Image Generator")
77
 
78
  with gr.Row():
79
  prompt = gr.Textbox(
80
  label="Prompt",
81
  show_label=False,
82
+ placeholder="Enter your prompt",
83
+ lines=1,
84
  scale=3
85
  )
86
  generate_btn = gr.Button("Generate", variant="primary")
87
 
88
+ output_image = gr.Image(label="Generated Image", type="pil", format="png")
89
+ seed_display = gr.Textbox(label="Seed Used", interactive=False)
90
 
91
  with gr.Accordion("⚙️ Advanced Settings", open=False):
92
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What to avoid (optional)", max_lines=1)
93
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
94
+ seed = gr.Slider(0, MAX_SEED, step=1, label="Seed", value=0)
 
 
 
 
95
 
96
+ guidance_scale = gr.Slider(0.0, 10.0, step=0.1, label="Guidance Scale", value=0.0)
97
+ num_inference_steps = gr.Slider(1, 50, step=1, label="Inference Steps", value=2)
 
98
 
99
  gr.Examples(examples=examples, inputs=[prompt])
100
 
 
105
  negative_prompt,
106
  seed,
107
  randomize_seed,
 
 
108
  guidance_scale,
109
  num_inference_steps
110
  ],
111
+ outputs=[output_image, seed_display]
112
  )
113
 
114
  if __name__ == "__main__":
115
+ demo.launch()