ameerazam08 commited on
Commit
b57786a
Β·
verified Β·
1 Parent(s): 428167d

sdxl 4 steps

Browse files



@jiaxiangc
Now fix check this

Files changed (1) hide show
  1. app.py +134 -28
app.py CHANGED
@@ -1,14 +1,13 @@
1
-
2
  import os
3
 
4
- os.system("pip install -U peft")
5
  import random
6
 
7
  import gradio as gr
8
  import numpy as np
9
  import PIL.Image
10
 
11
- import spaces
12
  import torch
13
  from diffusers import (
14
  StableDiffusionXLPipeline,
@@ -30,6 +29,7 @@ if not torch.cuda.is_available():
30
  "\n<h1>Running on CPU πŸ₯Ά This demo does not work on CPU.</a> instead</h1>"
31
  )
32
 
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
35
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
@@ -59,17 +59,28 @@ pipe.load_lora_weights(
59
  )
60
 
61
 
62
-
63
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
64
  if randomize_seed:
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  seed: int = 0,
66
  width: int = 1024,
67
  height: int = 1024,
68
- guidance_scale: float = 0,
69
- num_inference_steps: int = 4,
70
  progress=gr.Progress(track_tqdm=True),
71
- ) -> PIL.Image.Image:
72
-
73
  print(f'** Generating image for: "{prompt}" **')
74
  generator = torch.Generator().manual_seed(seed)
75
 
@@ -86,8 +97,8 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
86
  negative_prompt_2=negative_prompt_2,
87
  width=width,
88
  height=height,
89
- num_inference_steps=num_inference_steps,
90
- guidance_scale=guidance_scale,
91
 
92
  output_type="pil",
93
  generator=generator,
@@ -102,8 +113,8 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
102
  negative_prompt_2=negative_prompt_2,
103
  width=width,
104
  height=height,
105
- num_inference_steps=num_inference_steps,
106
- guidance_scale=guidance_scale,
107
  output_type="pil",
108
  generator=generator,
109
  ).images[0]
@@ -116,7 +127,7 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
116
 
117
  examples = [
118
  "A girl smiling",
119
- "A boy smiling",
120
 
121
  ]
122
 
@@ -130,33 +141,78 @@ theme = gr.themes.Base(
130
  )
131
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
132
  gr.Markdown(DESCRIPTION)
133
- # result = gr.Gallery(label="Right is Res-Adapt-LORA and Left is Base"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  with gr.Accordion("Advanced options", open=False):
135
  with gr.Row():
136
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
137
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
138
- use_negative_prompt_2 = gr.Checkbox(
139
- label="Use negative prompt 2", value=False
140
- )
141
  negative_prompt = gr.Text(
142
  label="Negative prompt",
143
  max_lines=1,
144
- placeholder="Enter your prompt",
145
- visible=True,
146
  )
147
  prompt_2 = gr.Text(
148
- value=512,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  )
150
  with gr.Row():
151
- guidance_scale = gr.Slider(
152
- label="Guidance scale",
153
  minimum=0,
154
- maximum=20,
155
  step=0.1,
156
  value=0,
157
  )
158
- num_inference_steps = gr.Slider(
159
- label="Number of inference steps",
160
  minimum=1,
161
  maximum=50,
162
  step=1,
@@ -164,13 +220,63 @@ with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
164
  )
165
  gr.Examples(
166
  examples=examples,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  seed,
168
  width,
169
  height,
170
- guidance_scale,
171
- num_inference_steps,
172
  ],
173
- outputs=gr.Gallery(label="Left is ResAdapter and Right is Base"),
174
  api_name="run",
175
  )
176
 
 
 
1
  import os
2
 
3
+ # os.system("pip install -U peft")
4
  import random
5
 
6
  import gradio as gr
7
  import numpy as np
8
  import PIL.Image
9
 
10
+ # import spaces
11
  import torch
12
  from diffusers import (
13
  StableDiffusionXLPipeline,
 
29
  "\n<h1>Running on CPU πŸ₯Ά This demo does not work on CPU.</a> instead</h1>"
30
  )
31
 
32
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
33
  MAX_SEED = np.iinfo(np.int32).max
34
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
35
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
 
59
  )
60
 
61
 
 
62
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
63
  if randomize_seed:
64
+ seed = random.randint(0, MAX_SEED)
65
+ return seed
66
+
67
+
68
+ # @spaces.GPU(enable_queue=True)
69
+ def generate(
70
+ prompt: str,
71
+ negative_prompt: str = "",
72
+ prompt_2: str = "",
73
+ negative_prompt_2: str = "",
74
+ use_negative_prompt: bool = False,
75
+ use_prompt_2: bool = False,
76
+ use_negative_prompt_2: bool = False,
77
  seed: int = 0,
78
  width: int = 1024,
79
  height: int = 1024,
80
+ guidance_scale_base: float = 5.0,
81
+ num_inference_steps_base: int = 4,
82
  progress=gr.Progress(track_tqdm=True),
83
+ ) -> PIL.Image.Image:
 
84
  print(f'** Generating image for: "{prompt}" **')
85
  generator = torch.Generator().manual_seed(seed)
86
 
 
97
  negative_prompt_2=negative_prompt_2,
98
  width=width,
99
  height=height,
100
+ num_inference_steps=num_inference_steps_base,
101
+ guidance_scale=guidance_scale_base,
102
 
103
  output_type="pil",
104
  generator=generator,
 
113
  negative_prompt_2=negative_prompt_2,
114
  width=width,
115
  height=height,
116
+ num_inference_steps=num_inference_steps_base,
117
+ guidance_scale=guidance_scale_base,
118
  output_type="pil",
119
  generator=generator,
120
  ).images[0]
 
127
 
128
  examples = [
129
  "A girl smiling",
130
+ "A realistic photograph of an astronaut in a jungle, cold color palette, detailed, 8k",
131
 
132
  ]
133
 
 
141
  )
142
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
143
  gr.Markdown(DESCRIPTION)
144
+ gr.DuplicateButton(
145
+ value="Duplicate Space for private use",
146
+ elem_id="duplicate-button",
147
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
148
+ )
149
+ with gr.Group():
150
+ prompt = gr.Text(
151
+ label="Prompt",
152
+ show_label=False,
153
+ max_lines=1,
154
+ container=False,
155
+ placeholder="Enter your prompt",
156
+ )
157
+ run_button = gr.Button("Generate")
158
+ # result = gr.Gallery(label="Left is Base and Right is Lora"),
159
  with gr.Accordion("Advanced options", open=False):
160
  with gr.Row():
161
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
162
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
163
+ use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
 
 
164
  negative_prompt = gr.Text(
165
  label="Negative prompt",
166
  max_lines=1,
167
+ placeholder="Enter a negative prompt",
168
+ visible=False,
169
  )
170
  prompt_2 = gr.Text(
171
+ label="Prompt 2",
172
+ max_lines=1,
173
+ placeholder="Enter your prompt",
174
+ visible=False,
175
+ )
176
+ negative_prompt_2 = gr.Text(
177
+ label="Negative prompt 2",
178
+ max_lines=1,
179
+ placeholder="Enter a negative prompt",
180
+ visible=False,
181
+ )
182
+
183
+ seed = gr.Slider(
184
+ label="Seed",
185
+ minimum=0,
186
+ maximum=MAX_SEED,
187
+ step=1,
188
+ value=0,
189
+ )
190
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
191
+ with gr.Row():
192
+ width = gr.Slider(
193
+ label="Width",
194
+ minimum=256,
195
+ maximum=MAX_IMAGE_SIZE,
196
+ step=32,
197
+ value=1024,
198
+ )
199
+ height = gr.Slider(
200
+ label="Height",
201
+ minimum=256,
202
+ maximum=MAX_IMAGE_SIZE,
203
+ step=32,
204
+ value=1024,
205
  )
206
  with gr.Row():
207
+ guidance_scale_base = gr.Slider(
208
+ label="Guidance scale for base",
209
  minimum=0,
210
+ maximum=1,
211
  step=0.1,
212
  value=0,
213
  )
214
+ num_inference_steps_base = gr.Slider(
215
+ label="Number of inference steps for base",
216
  minimum=1,
217
  maximum=50,
218
  step=1,
 
220
  )
221
  gr.Examples(
222
  examples=examples,
223
+ inputs=prompt,
224
+ outputs=None,
225
+ fn=generate,
226
+ cache_examples=CACHE_EXAMPLES,
227
+ )
228
+
229
+ use_negative_prompt.change(
230
+ fn=lambda x: gr.update(visible=x),
231
+ inputs=use_negative_prompt,
232
+ outputs=negative_prompt,
233
+ queue=False,
234
+ api_name=False,
235
+ )
236
+ use_prompt_2.change(
237
+ fn=lambda x: gr.update(visible=x),
238
+ inputs=use_prompt_2,
239
+ outputs=prompt_2,
240
+ queue=False,
241
+ api_name=False,
242
+ )
243
+ use_negative_prompt_2.change(
244
+ fn=lambda x: gr.update(visible=x),
245
+ inputs=use_negative_prompt_2,
246
+ outputs=negative_prompt_2,
247
+ queue=False,
248
+ api_name=False,
249
+ )
250
+ gr.on(
251
+ triggers=[
252
+ prompt.submit,
253
+ negative_prompt.submit,
254
+ prompt_2.submit,
255
+ negative_prompt_2.submit,
256
+ run_button.click,
257
+ ],
258
+ fn=randomize_seed_fn,
259
+ inputs=[seed, randomize_seed],
260
+ outputs=seed,
261
+ queue=False,
262
+ api_name=False,
263
+ ).then(
264
+ fn=generate,
265
+ inputs=[
266
+ prompt,
267
+ negative_prompt,
268
+ prompt_2,
269
+ negative_prompt_2,
270
+ use_negative_prompt,
271
+ use_prompt_2,
272
+ use_negative_prompt_2,
273
  seed,
274
  width,
275
  height,
276
+ guidance_scale_base,
277
+ num_inference_steps_base,
278
  ],
279
+ outputs=gr.Gallery(label="Right is Base and Left is ResAdapt with SDXL-ByteDance"),
280
  api_name="run",
281
  )
282