Update app.py
Browse files
app.py
CHANGED
@@ -27,7 +27,7 @@ clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
|
|
27 |
longformer_tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
|
28 |
longformer_model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
|
29 |
|
30 |
-
#
|
31 |
df = pd.read_csv('prompts.csv', header=None)
|
32 |
prompt_values = df.values.flatten()
|
33 |
|
@@ -42,7 +42,13 @@ base_model = "black-forest-labs/FLUX.1-dev"
|
|
42 |
|
43 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
44 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
45 |
-
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
MAX_SEED = 2**32 - 1
|
48 |
|
@@ -470,9 +476,10 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
|
470 |
):
|
471 |
print("Image generated successfully.") # Debugging statement
|
472 |
yield img
|
|
|
473 |
|
474 |
@spaces.GPU(duration=75)
|
475 |
-
def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
476 |
print("run_lora function called.") # Debugging statement
|
477 |
print(f"Inputs received - Prompt: {prompt}, CFG Scale: {cfg_scale}, Steps: {steps}, Seed: {seed}, Width: {width}, Height: {height}") # Debugging statement
|
478 |
|
@@ -498,8 +505,7 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
498 |
# Unload previous LoRA weights
|
499 |
with calculateDuration("Unloading LoRA"):
|
500 |
pipe.unload_lora_weights()
|
501 |
-
pipe_i2i.unload_lora_weights()
|
502 |
-
|
503 |
print("Active adapters before loading new LoRAs:", pipe.get_active_adapters())
|
504 |
|
505 |
# Load LoRA weights with respective scales
|
@@ -514,7 +520,7 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
514 |
lora_path = lora['repo']
|
515 |
weight_name = lora.get("weights")
|
516 |
print(f"Lora Path: {lora_path}")
|
517 |
-
pipe_to_use = pipe_i2i if image_input is not None else pipe
|
518 |
pipe_to_use.load_lora_weights(
|
519 |
lora_path,
|
520 |
weight_name=weight_name if weight_name else None,
|
@@ -523,9 +529,9 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
523 |
)
|
524 |
print("Loaded LoRAs:", lora_names)
|
525 |
print("Adapter weights:", lora_weights)
|
526 |
-
if image_input is not None:
|
527 |
-
pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
|
528 |
-
else:
|
529 |
pipe.set_adapters(lora_names, adapter_weights=lora_weights)
|
530 |
|
531 |
print("Active adapters after loading new LoRAs:", pipe.get_active_adapters())
|
@@ -539,8 +545,8 @@ def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scal
|
|
539 |
# Generate image
|
540 |
try:
|
541 |
if image_input is not None:
|
542 |
-
|
543 |
-
yield final_image, seed, gr.update(visible=
|
544 |
else:
|
545 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
546 |
# Consume the generator to get the final image
|
|
|
27 |
longformer_tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
|
28 |
longformer_model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
|
29 |
|
30 |
+
#Load prompts for randomization
|
31 |
df = pd.read_csv('prompts.csv', header=None)
|
32 |
prompt_values = df.values.flatten()
|
33 |
|
|
|
42 |
|
43 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
44 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
45 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1,
|
46 |
+
transformer=pipe.transformer,
|
47 |
+
text_encoder=pipe.text_encoder,
|
48 |
+
tokenizer=pipe.tokenizer,
|
49 |
+
text_encoder_2=pipe.text_encoder_2,
|
50 |
+
tokenizer_2=pipe.tokenizer_2,
|
51 |
+
).to(device)
|
52 |
|
53 |
MAX_SEED = 2**32 - 1
|
54 |
|
|
|
476 |
):
|
477 |
print("Image generated successfully.") # Debugging statement
|
478 |
yield img
|
479 |
+
return final_image
|
480 |
|
481 |
@spaces.GPU(duration=75)
|
482 |
+
def run_lora(prompt, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_scale_4, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
483 |
print("run_lora function called.") # Debugging statement
|
484 |
print(f"Inputs received - Prompt: {prompt}, CFG Scale: {cfg_scale}, Steps: {steps}, Seed: {seed}, Width: {width}, Height: {height}") # Debugging statement
|
485 |
|
|
|
505 |
# Unload previous LoRA weights
|
506 |
with calculateDuration("Unloading LoRA"):
|
507 |
pipe.unload_lora_weights()
|
508 |
+
## pipe_i2i.unload_lora_weights()
|
|
|
509 |
print("Active adapters before loading new LoRAs:", pipe.get_active_adapters())
|
510 |
|
511 |
# Load LoRA weights with respective scales
|
|
|
520 |
lora_path = lora['repo']
|
521 |
weight_name = lora.get("weights")
|
522 |
print(f"Lora Path: {lora_path}")
|
523 |
+
## pipe_to_use = pipe_i2i if image_input is not None else pipe
|
524 |
pipe_to_use.load_lora_weights(
|
525 |
lora_path,
|
526 |
weight_name=weight_name if weight_name else None,
|
|
|
529 |
)
|
530 |
print("Loaded LoRAs:", lora_names)
|
531 |
print("Adapter weights:", lora_weights)
|
532 |
+
## if image_input is not None:
|
533 |
+
## pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
|
534 |
+
## else:
|
535 |
pipe.set_adapters(lora_names, adapter_weights=lora_weights)
|
536 |
|
537 |
print("Active adapters after loading new LoRAs:", pipe.get_active_adapters())
|
|
|
545 |
# Generate image
|
546 |
try:
|
547 |
if image_input is not None:
|
548 |
+
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
|
549 |
+
yield final_image, seed, gr.update(visible=True)
|
550 |
else:
|
551 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
|
552 |
# Consume the generator to get the final image
|