Update src/pipeline.py
Browse files- src/pipeline.py +2 -2
src/pipeline.py
CHANGED
@@ -13,7 +13,7 @@ import diffusers
|
|
13 |
import gc
|
14 |
from diffusers import FluxTransformer2DModel, DiffusionPipeline
|
15 |
from torchao.quantization import quantize_,int8_weight_only
|
16 |
-
|
17 |
os.environ["TOKENIZERS_PARALLELISM"] = "True"
|
18 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
19 |
Pipeline = None
|
@@ -30,7 +30,7 @@ def load_pipeline() -> Pipeline:
|
|
30 |
dtype, device = torch.bfloat16, "cuda"
|
31 |
vae = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=dtype)
|
32 |
quantize_(vae, int8_weight_only())
|
33 |
-
model = FluxTransformer2DModel.from_pretrained("slobers/
|
34 |
pipeline = DiffusionPipeline.from_pretrained(ckpt_id, vae=vae, transformer=model, torch_dtype=dtype,)
|
35 |
torch.backends.cudnn.benchmark = True
|
36 |
torch.backends.cuda.matmul.allow_tf32 = True
|
|
|
13 |
import gc
|
14 |
from diffusers import FluxTransformer2DModel, DiffusionPipeline
|
15 |
from torchao.quantization import quantize_,int8_weight_only
|
16 |
+
HOME = os.environ["HOME"]
|
17 |
os.environ["TOKENIZERS_PARALLELISM"] = "True"
|
18 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
19 |
Pipeline = None
|
|
|
30 |
dtype, device = torch.bfloat16, "cuda"
|
31 |
vae = AutoencoderTiny.from_pretrained("aifeifei798/taef1", torch_dtype=dtype)
|
32 |
quantize_(vae, int8_weight_only())
|
33 |
+
model = FluxTransformer2DModel.from_pretrained(f"{HOME}/.cache/huggingface/hub/models--slobers--transgender/snapshots/cb99836efa0ed55856970269c42fafdaa0e44c5d", torch_dtype=dtype, use_safetensors=False)
|
34 |
pipeline = DiffusionPipeline.from_pretrained(ckpt_id, vae=vae, transformer=model, torch_dtype=dtype,)
|
35 |
torch.backends.cudnn.benchmark = True
|
36 |
torch.backends.cuda.matmul.allow_tf32 = True
|