Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
22 |
pipeline = AutoPipelineForText2Image.from_pretrained(
|
23 |
"black-forest-labs/FLUX.1-dev",
|
24 |
torch_dtype=torch.bfloat16,
|
25 |
-
use_auth_token=HF_TOKEN
|
26 |
).to(device)
|
27 |
pipeline.load_lora_weights("Purz/face-projection", weight_name="purz-f4c3_p40j3ct10n.safetensors")
|
28 |
pipeline.enable_model_cpu_offload() # Optimize memory usage
|
@@ -66,7 +66,7 @@ def generate_images(text, selected_model):
|
|
66 |
results = []
|
67 |
for i in range(3):
|
68 |
modified_text = f"{prompt_prefix}{text} variation {i+1}"
|
69 |
-
image = pipeline(modified_text, num_inference_steps=20).images[0]
|
70 |
results.append(image)
|
71 |
return results
|
72 |
|
@@ -146,5 +146,4 @@ with gr.Blocks(title="AI Tutor with Visuals") as demo:
|
|
146 |
outputs=[output1, output2, output3]
|
147 |
)
|
148 |
|
149 |
-
# In Hugging Face Spaces, this variable is automatically used as the app entry point
|
150 |
demo.launch()
|
|
|
22 |
pipeline = AutoPipelineForText2Image.from_pretrained(
|
23 |
"black-forest-labs/FLUX.1-dev",
|
24 |
torch_dtype=torch.bfloat16,
|
25 |
+
use_auth_token=HF_TOKEN
|
26 |
).to(device)
|
27 |
pipeline.load_lora_weights("Purz/face-projection", weight_name="purz-f4c3_p40j3ct10n.safetensors")
|
28 |
pipeline.enable_model_cpu_offload() # Optimize memory usage
|
|
|
66 |
results = []
|
67 |
for i in range(3):
|
68 |
modified_text = f"{prompt_prefix}{text} variation {i+1}"
|
69 |
+
image = pipeline(modified_text, num_inference_steps=20).images[0]
|
70 |
results.append(image)
|
71 |
return results
|
72 |
|
|
|
146 |
outputs=[output1, output2, output3]
|
147 |
)
|
148 |
|
|
|
149 |
demo.launch()
|