Update src/pipeline.py
Browse files- src/pipeline.py +3 -4
src/pipeline.py
CHANGED
@@ -593,14 +593,13 @@ def load_pipeline() -> Pipeline:
|
|
593 |
dtype, device = torch.bfloat16, "cuda"
|
594 |
|
595 |
text_encoder_2 = T5EncoderModel.from_pretrained(
|
596 |
-
"
|
597 |
).to(memory_format=torch.channels_last)
|
598 |
-
|
599 |
|
600 |
|
601 |
-
vae = AutoencoderTiny.from_pretrained("
|
602 |
|
603 |
-
path = os.path.join(HF_HUB_CACHE, "models--
|
604 |
generator = torch.Generator(device=device)
|
605 |
model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=dtype, use_safetensors=False, generator= generator).to(memory_format=torch.channels_last)
|
606 |
torch.backends.cudnn.benchmark = True
|
|
|
593 |
dtype, device = torch.bfloat16, "cuda"
|
594 |
|
595 |
text_encoder_2 = T5EncoderModel.from_pretrained(
|
596 |
+
"city96/t5-v1_1-xxl-encoder-bf16", revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86", torch_dtype=torch.bfloat16
|
597 |
).to(memory_format=torch.channels_last)
|
|
|
598 |
|
599 |
|
600 |
+
vae = AutoencoderTiny.from_pretrained("RobertML/FLUX.1-schnell-vae_int8", revision="2e4fdf1337ed12ed202e23e6f25269aa7d6d3f2a", torch_dtype=dtype)
|
601 |
|
602 |
+
path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a")
|
603 |
generator = torch.Generator(device=device)
|
604 |
model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=dtype, use_safetensors=False, generator= generator).to(memory_format=torch.channels_last)
|
605 |
torch.backends.cudnn.benchmark = True
|