Keltezaa commited on
Commit
00d1c09
·
verified ·
1 Parent(s): f243507

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -30
app.py CHANGED
@@ -294,23 +294,38 @@ def remove_custom_lora(selected_indices, current_loras, gallery):
294
  )
295
 
296
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
297
- print("Generating image...")
298
  pipe.to("cuda")
299
  generator = torch.Generator(device="cuda").manual_seed(seed)
300
- with calculateDuration("Generating image"):
301
- # Generate images
302
- for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
303
- prompt=prompt_mash,
304
- num_inference_steps=steps,
305
- guidance_scale=cfg_scale,
306
- width=width,
307
- height=height,
308
- generator=generator,
309
- joint_attention_kwargs={"scale": 1.0},
310
- output_type="pil",
311
- good_vae=good_vae,
312
- ):
313
- yield img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
 
315
  @spaces.GPU(duration=75)
316
  def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
@@ -365,22 +380,9 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
365
  if randomize_seed:
366
  seed = random.randint(0, MAX_SEED)
367
 
368
- image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
369
-
370
- step_counter = 0
371
- images = []
372
- for image in image_generator:
373
- step_counter += 1
374
- images.append(image)
375
- progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
376
- if len(images) == 4: # Ensure four images are collected
377
- break
378
-
379
- # Pad to ensure exactly 4 images are returned
380
- while len(images) < 4:
381
- images.append(None)
382
 
383
- return *images, seed, gr.update(value=progress_bar, visible=True)
384
 
385
  run_lora.zerogpu = True
386
 
 
294
  )
295
 
296
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
297
+ print("Generating images...")
298
  pipe.to("cuda")
299
  generator = torch.Generator(device="cuda").manual_seed(seed)
300
+ images = []
301
+
302
+ with calculateDuration("Generating images"):
303
+ try:
304
+ for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
305
+ prompt=prompt_mash,
306
+ num_inference_steps=steps,
307
+ guidance_scale=cfg_scale,
308
+ width=width,
309
+ height=height,
310
+ generator=generator,
311
+ joint_attention_kwargs={"scale": 1.0},
312
+ output_type="pil",
313
+ good_vae=good_vae,
314
+ ):
315
+ images.append(img)
316
+ progress(percent=len(images) / 4 * 100) # Adjust progress for 4 images
317
+ if len(images) == 4: # Collect exactly 4 images
318
+ break
319
+ except Exception as e:
320
+ print(f"Error during image generation: {e}")
321
+ raise
322
+
323
+ if len(images) < 4:
324
+ print("Fewer than 4 images generated. Padding with None.")
325
+ while len(images) < 4:
326
+ images.append(None)
327
+
328
+ return images
329
 
330
  @spaces.GPU(duration=75)
331
  def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
 
380
  if randomize_seed:
381
  seed = random.randint(0, MAX_SEED)
382
 
383
+ images = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
 
 
 
 
 
 
 
 
 
 
 
 
 
384
 
385
+ return *images[:4], seed, gr.update(value="", visible=False)
386
 
387
  run_lora.zerogpu = True
388