Update src/pipeline.py
Browse files- src/pipeline.py +3 -8
src/pipeline.py
CHANGED
@@ -9,8 +9,7 @@ from diffusers import FluxTransformer2DModel, DiffusionPipeline
|
|
9 |
import gc
|
10 |
import os
|
11 |
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
|
12 |
-
|
13 |
-
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
|
14 |
|
15 |
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
|
16 |
HOME = os.environ["HOME"]
|
@@ -26,14 +25,10 @@ def empty_cache():
|
|
26 |
|
27 |
def load_pipeline() -> Pipeline:
|
28 |
empty_cache()
|
29 |
-
text_encoder = CLIPTextModel.from_pretrained(ckpt_id, subfolder="text_encoder", torch_dtype=torch.bfloat16)
|
30 |
-
quantize_(text_encoder, int8_weight_only())
|
31 |
vae = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=torch.bfloat16)
|
32 |
-
|
33 |
text_encoder_2 = T5EncoderModel.from_pretrained("city96/t5-v1_1-xxl-encoder-bf16", torch_dtype=torch.bfloat16)
|
34 |
-
|
35 |
-
model = FluxTransformer2DModel.from_pretrained(ckpt_id, subfolder="transformer", torch_dtype=torch.bfloat16, use_safetensors=False)
|
36 |
-
pipeline = DiffusionPipeline.from_pretrained(ckpt_id, text_encoder=text_encoder, transformer=model, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16)
|
37 |
pipeline.to("cuda")
|
38 |
|
39 |
for _ in range(2):
|
|
|
9 |
import gc
|
10 |
import os
|
11 |
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
|
12 |
+
|
|
|
13 |
|
14 |
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
|
15 |
HOME = os.environ["HOME"]
|
|
|
25 |
|
26 |
def load_pipeline() -> Pipeline:
|
27 |
empty_cache()
|
|
|
|
|
28 |
vae = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=torch.bfloat16)
|
29 |
+
model = FluxTransformer2DModel.from_pretrained(f"{HOME}/.cache/huggingface/hub/models--slobers--transgender/snapshots/cb99836efa0ed55856970269c42fafdaa0e44c5d", torch_dtype=torch.bfloat16, use_safetensors=False)
|
30 |
text_encoder_2 = T5EncoderModel.from_pretrained("city96/t5-v1_1-xxl-encoder-bf16", torch_dtype=torch.bfloat16)
|
31 |
+
pipeline = DiffusionPipeline.from_pretrained(ckpt_id, vae=vae, transformer=model, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16)
|
|
|
|
|
32 |
pipeline.to("cuda")
|
33 |
|
34 |
for _ in range(2):
|