Update app.py
Browse files
app.py
CHANGED
@@ -311,23 +311,6 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
311 |
good_vae=good_vae,
|
312 |
):
|
313 |
yield img
|
314 |
-
|
315 |
-
#def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
|
316 |
-
# pipe_i2i.to("cuda")
|
317 |
-
# generator = torch.Generator(device="cuda").manual_seed(seed)
|
318 |
-
# image_input = load_image(image_input_path)
|
319 |
-
# final_image = pipe_i2i(
|
320 |
-
# prompt=prompt_mash,
|
321 |
-
# image=image_input,
|
322 |
-
# strength=image_strength,
|
323 |
-
# num_inference_steps=steps,
|
324 |
-
# guidance_scale=cfg_scale,
|
325 |
-
# width=width,
|
326 |
-
# height=height,
|
327 |
-
# generator=generator,
|
328 |
-
# joint_attention_kwargs={"scale": 1.0},
|
329 |
-
# output_type="pil",
|
330 |
-
# ).images[0]
|
331 |
return img
|
332 |
|
333 |
@spaces.GPU(duration=75)
|
@@ -352,10 +335,10 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
352 |
# Unload previous LoRA weights
|
353 |
with calculateDuration("Unloading LoRA"):
|
354 |
pipe.unload_lora_weights()
|
355 |
-
|
356 |
|
357 |
print(pipe.get_active_adapters())
|
358 |
-
|
359 |
lora_names = []
|
360 |
lora_weights = []
|
361 |
with calculateDuration("Loading LoRA weights"):
|
@@ -373,35 +356,25 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
373 |
low_cpu_mem_usage=True,
|
374 |
adapter_name=lora_name
|
375 |
)
|
376 |
-
|
377 |
-
# else: pipe = pipe_to_use
|
378 |
print("Loaded LoRAs:", lora_names)
|
379 |
print("Adapter weights:", lora_weights)
|
380 |
-
|
381 |
-
# pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
|
382 |
-
# else:
|
383 |
pipe.set_adapters(lora_names, adapter_weights=lora_weights)
|
384 |
-
|
385 |
-
# Set random seed for reproducibility
|
386 |
with calculateDuration("Randomizing seed"):
|
387 |
if randomize_seed:
|
388 |
seed = random.randint(0, MAX_SEED)
|
389 |
|
390 |
-
# Generate image
|
391 |
-
# if image_input is not None:
|
392 |
-
# final_image = generate_image_to_image(prompt_mash, steps, cfg_scale, width, height, seed)
|
393 |
-
# yield final_image, seed, gr.update(visible=False)
|
394 |
-
# else:
|
395 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
396 |
-
|
397 |
-
# final_image = None
|
398 |
step_counter = 0
|
399 |
for image in image_generator:
|
400 |
step_counter += 1
|
401 |
final_image = image
|
402 |
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
|
403 |
yield image, seed, gr.update(value=progress_bar, visible=True)
|
404 |
-
|
405 |
|
406 |
run_lora.zerogpu = True
|
407 |
|
|
|
311 |
good_vae=good_vae,
|
312 |
):
|
313 |
yield img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
return img
|
315 |
|
316 |
@spaces.GPU(duration=75)
|
|
|
335 |
# Unload previous LoRA weights
|
336 |
with calculateDuration("Unloading LoRA"):
|
337 |
pipe.unload_lora_weights()
|
338 |
+
|
339 |
|
340 |
print(pipe.get_active_adapters())
|
341 |
+
|
342 |
lora_names = []
|
343 |
lora_weights = []
|
344 |
with calculateDuration("Loading LoRA weights"):
|
|
|
356 |
low_cpu_mem_usage=True,
|
357 |
adapter_name=lora_name
|
358 |
)
|
359 |
+
|
|
|
360 |
print("Loaded LoRAs:", lora_names)
|
361 |
print("Adapter weights:", lora_weights)
|
362 |
+
|
|
|
|
|
363 |
pipe.set_adapters(lora_names, adapter_weights=lora_weights)
|
364 |
+
|
|
|
365 |
with calculateDuration("Randomizing seed"):
|
366 |
if randomize_seed:
|
367 |
seed = random.randint(0, MAX_SEED)
|
368 |
|
|
|
|
|
|
|
|
|
|
|
369 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
370 |
+
|
|
|
371 |
step_counter = 0
|
372 |
for image in image_generator:
|
373 |
step_counter += 1
|
374 |
final_image = image
|
375 |
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
|
376 |
yield image, seed, gr.update(value=progress_bar, visible=True)
|
377 |
+
|
378 |
|
379 |
run_lora.zerogpu = True
|
380 |
|