teste
Browse files
app.py
CHANGED
@@ -207,24 +207,26 @@ loras = [
|
|
207 |
#add new
|
208 |
]
|
209 |
|
210 |
-
|
211 |
-
from diffusers import AutoencoderKL
|
212 |
-
|
213 |
-
# Definir dtype antes de usar
|
214 |
dtype = torch.bfloat16
|
215 |
-
|
216 |
-
good_vae = AutoencoderKL.from_pretrained(
|
217 |
-
"black-forest-labs/FLUX.1-dev",
|
218 |
-
subfolder="vae",
|
219 |
-
torch_dtype=dtype,
|
220 |
-
use_auth_token=True
|
221 |
-
).to("cuda" if torch.cuda.is_available() else "cpu")
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
226 |
base_model = "black-forest-labs/FLUX.1-dev"
|
227 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
#TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
|
229 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
230 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
|
|
207 |
#add new
|
208 |
]
|
209 |
|
210 |
+
# Initialize the base model
|
|
|
|
|
|
|
211 |
dtype = torch.bfloat16
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
213 |
base_model = "black-forest-labs/FLUX.1-dev"
|
214 |
|
215 |
+
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
216 |
+
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
217 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
|
218 |
+
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
219 |
+
base_model,
|
220 |
+
vae=good_vae,
|
221 |
+
transformer=pipe.transformer,
|
222 |
+
text_encoder=pipe.text_encoder,
|
223 |
+
tokenizer=pipe.tokenizer,
|
224 |
+
text_encoder_2=pipe.text_encoder_2,
|
225 |
+
tokenizer_2=pipe.tokenizer_2,
|
226 |
+
torch_dtype=dtype
|
227 |
+
)
|
228 |
+
|
229 |
+
|
230 |
#TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
|
231 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
232 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|