Keltezaa commited on
Commit
759ed4d
·
verified ·
1 Parent(s): 7971428

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -14
app.py CHANGED
@@ -298,7 +298,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
298
  pipe.to("cuda")
299
  generator = torch.Generator(device="cuda").manual_seed(seed)
300
  with calculateDuration("Generating image"):
301
- # Generate image
302
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
303
  prompt=prompt_mash,
304
  num_inference_steps=steps,
@@ -311,7 +311,6 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
311
  good_vae=good_vae,
312
  ):
313
  yield img
314
- return img
315
 
316
  @spaces.GPU(duration=75)
317
  def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
@@ -332,11 +331,11 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
332
  appends.append(trigger_word)
333
  prompt_mash = " ".join(prepends + [prompt] + appends)
334
  print("Prompt Mash: ", prompt_mash)
 
335
  # Unload previous LoRA weights
336
  with calculateDuration("Unloading LoRA"):
337
  pipe.unload_lora_weights()
338
 
339
-
340
  print(pipe.get_active_adapters())
341
 
342
  lora_names = []
@@ -369,13 +368,13 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
369
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
370
 
371
  step_counter = 0
 
372
  for image in image_generator:
373
  step_counter += 1
374
- final_image = image
375
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
376
- yield image, seed, gr.update(value=progress_bar, visible=True)
377
-
378
-
379
  run_lora.zerogpu = True
380
 
381
  def get_huggingface_safetensors(link):
@@ -563,13 +562,8 @@ with gr.Blocks(css=css, delete_cache=(240, 240)) as app:
563
  interactive=False
564
  )
565
  with gr.Column():
566
- progress_bar = gr.Markdown(elem_id="progress", visible=False)
567
- result = gr.Image(label="Image 1", interactive=False, show_share_button=False)
568
- result = gr.Image(label="Image 2", interactive=False, show_share_button=False)
569
- result = gr.Image(label="Image 3", interactive=False, show_share_button=False)
570
- result = gr.Image(label="Image 4", interactive=False, show_share_button=False)
571
- # with gr.Accordion("History", open=False):
572
- # history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
573
 
574
  gallery.select(
575
  update_selection,
 
298
  pipe.to("cuda")
299
  generator = torch.Generator(device="cuda").manual_seed(seed)
300
  with calculateDuration("Generating image"):
301
+ # Generate images
302
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
303
  prompt=prompt_mash,
304
  num_inference_steps=steps,
 
311
  good_vae=good_vae,
312
  ):
313
  yield img
 
314
 
315
  @spaces.GPU(duration=75)
316
  def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
 
331
  appends.append(trigger_word)
332
  prompt_mash = " ".join(prepends + [prompt] + appends)
333
  print("Prompt Mash: ", prompt_mash)
334
+
335
  # Unload previous LoRA weights
336
  with calculateDuration("Unloading LoRA"):
337
  pipe.unload_lora_weights()
338
 
 
339
  print(pipe.get_active_adapters())
340
 
341
  lora_names = []
 
368
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
369
 
370
  step_counter = 0
371
+ images = []
372
  for image in image_generator:
373
  step_counter += 1
374
+ images.append(image)
375
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
376
+ yield images, seed, gr.update(value=progress_bar, visible=True)
377
+
 
378
  run_lora.zerogpu = True
379
 
380
  def get_huggingface_safetensors(link):
 
562
  interactive=False
563
  )
564
  with gr.Column():
565
+ progress_bar = gr.Markdown(elem_id="progress", visible=False)
566
+ results = [gr.Image(label=f"Image {i+1}", interactive=False, show_share_button=False) for i in range(4)]
 
 
 
 
 
567
 
568
  gallery.select(
569
  update_selection,