gaur3009 commited on
Commit
d4cf022
·
verified ·
1 Parent(s): d0ae4b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -205
app.py CHANGED
@@ -201,208 +201,4 @@ with gr.Blocks(css=css) as demo:
201
  outputs=[front_result, back_result]
202
  )
203
 
204
- demo.queue().launch()
205
- import gradio as gr
206
- import numpy as np
207
- import random
208
- from diffusers import DiffusionPipeline
209
- import torch
210
-
211
- device = "cuda" if torch.cuda.is_available() else "cpu"
212
-
213
- if torch.cuda.is_available():
214
- torch.cuda.max_memory_allocated(device=device)
215
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
216
- pipe.enable_xformers_memory_efficient_attention()
217
- pipe = pipe.to(device)
218
- else:
219
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
220
- pipe = pipe.to(device)
221
-
222
- MAX_SEED = np.iinfo(np.int32).max
223
- MAX_IMAGE_SIZE = 1024
224
-
225
- def infer(prompt_part1, color, dress_type, front_design, back_design, prompt_part5, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
226
- front_prompt = f"front view of {prompt_part1} {color} colored plain {dress_type} with {front_design} design, {prompt_part5}"
227
- back_prompt = f"back view of {prompt_part1} {color} colored plain {dress_type} with {back_design} design, {prompt_part5}"
228
-
229
- if randomize_seed:
230
- seed = random.randint(0, MAX_SEED)
231
-
232
- generator = torch.Generator().manual_seed(seed)
233
-
234
- front_image = pipe(
235
- prompt=front_prompt,
236
- negative_prompt=negative_prompt,
237
- guidance_scale=guidance_scale,
238
- num_inference_steps=num_inference_steps,
239
- width=width,
240
- height=height,
241
- generator=generator
242
- ).images[0]
243
-
244
- back_image = pipe(
245
- prompt=back_prompt,
246
- negative_prompt=negative_prompt,
247
- guidance_scale=guidance_scale,
248
- num_inference_steps=num_inference_steps,
249
- width=width,
250
- height=height,
251
- generator=generator
252
- ).images[0]
253
-
254
- return front_image, back_image
255
-
256
- examples = [
257
- ["red", "t-shirt", "yellow stripes", "polka dots"],
258
- ["blue", "hoodie", "minimalist", "abstract art"],
259
- ["red", "sweat shirt", "geometric design", "plain"],
260
- ]
261
-
262
- css = """
263
- #col-container {
264
- margin: 0 auto;
265
- max-width: 520px;
266
- }
267
- """
268
-
269
- if torch.cuda.is_available():
270
- power_device = "GPU"
271
- else:
272
- power_device = "CPU"
273
-
274
- with gr.Blocks(css=css) as demo:
275
-
276
- with gr.Column(elem_id="col-container"):
277
- gr.Markdown(f"""
278
- # Text-to-Image Gradio Template
279
- Currently running on {power_device}.
280
- """)
281
-
282
- with gr.Row():
283
-
284
- prompt_part1 = gr.Textbox(
285
- value="a single",
286
- label="Prompt Part 1",
287
- show_label=False,
288
- interactive=False,
289
- container=False,
290
- elem_id="prompt_part1",
291
- visible=False,
292
- )
293
-
294
- prompt_part2 = gr.Textbox(
295
- label="color",
296
- show_label=False,
297
- max_lines=1,
298
- placeholder="color (e.g., color category)",
299
- container=False,
300
- )
301
-
302
- prompt_part3 = gr.Textbox(
303
- label="dress_type",
304
- show_label=False,
305
- max_lines=1,
306
- placeholder="dress_type (e.g., t-shirt, sweatshirt, shirt, hoodie)",
307
- container=False,
308
- )
309
-
310
- prompt_part4_front = gr.Textbox(
311
- label="front design",
312
- show_label=False,
313
- max_lines=1,
314
- placeholder="front design",
315
- container=False,
316
- )
317
-
318
- prompt_part4_back = gr.Textbox(
319
- label="back design",
320
- show_label=False,
321
- max_lines=1,
322
- placeholder="back design",
323
- container=False,
324
- )
325
-
326
- prompt_part5 = gr.Textbox(
327
- value="hanging on the plain wall",
328
- label="Prompt Part 5",
329
- show_label=False,
330
- interactive=False,
331
- container=False,
332
- elem_id="prompt_part5",
333
- visible=False,
334
- )
335
-
336
-
337
- run_button = gr.Button("Run", scale=0)
338
-
339
- front_result = gr.Image(label="Front View Result", show_label=False)
340
- back_result = gr.Image(label="Back View Result", show_label=False)
341
-
342
- with gr.Accordion("Advanced Settings", open=False):
343
-
344
- negative_prompt = gr.Textbox(
345
- label="Negative prompt",
346
- max_lines=1,
347
- placeholder="Enter a negative prompt",
348
- visible=False,
349
- )
350
-
351
- seed = gr.Slider(
352
- label="Seed",
353
- minimum=0,
354
- maximum=MAX_SEED,
355
- step=1,
356
- value=0,
357
- )
358
-
359
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
360
-
361
- with gr.Row():
362
-
363
- width = gr.Slider(
364
- label="Width",
365
- minimum=256,
366
- maximum=MAX_IMAGE_SIZE,
367
- step=32,
368
- value=512,
369
- )
370
-
371
- height = gr.Slider(
372
- label="Height",
373
- minimum=256,
374
- maximum=MAX_IMAGE_SIZE,
375
- step=32,
376
- value=512,
377
- )
378
-
379
- with gr.Row():
380
-
381
- guidance_scale = gr.Slider(
382
- label="Guidance scale",
383
- minimum=0.0,
384
- maximum=10.0,
385
- step=0.1,
386
- value=0.0,
387
- )
388
-
389
- num_inference_steps = gr.Slider(
390
- label="Number of inference steps",
391
- minimum=1,
392
- maximum=12,
393
- step=1,
394
- value=2,
395
- )
396
-
397
- gr.Examples(
398
- examples=examples,
399
- inputs=[prompt_part2, prompt_part3, prompt_part4_front, prompt_part4_back]
400
- )
401
-
402
- run_button.click(
403
- fn=infer,
404
- inputs=[prompt_part1, prompt_part2, prompt_part3, prompt_part4_front, prompt_part4_back, prompt_part5, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
405
- outputs=[front_result, back_result]
406
- )
407
-
408
- demo.queue().launch()
 
201
  outputs=[front_result, back_result]
202
  )
203
 
204
+ demo.queue().launch()