Upload src/pipeline.py with huggingface_hub
Browse files- src/pipeline.py +5 -5
src/pipeline.py
CHANGED
@@ -28,20 +28,20 @@ def load_pipeline() -> Pipeline:
|
|
28 |
path,
|
29 |
use_safetensors=False,
|
30 |
local_files_only=True,
|
31 |
-
torch_dtype=torch.bfloat16)
|
|
|
32 |
pipeline = FluxPipeline.from_pretrained(
|
33 |
CHECKPOINT,
|
34 |
revision=REVISION,
|
35 |
transformer=transformer,
|
36 |
local_files_only=True,
|
37 |
torch_dtype=torch.bfloat16,
|
38 |
-
)
|
39 |
|
40 |
-
|
41 |
quantize_(pipeline.vae, int8_weight_only())
|
42 |
-
# pipeline.vae.compile()
|
43 |
pipeline.to("cuda")
|
44 |
-
for _ in range(
|
45 |
pipeline("cat", num_inference_steps=4)
|
46 |
|
47 |
return pipeline
|
|
|
28 |
path,
|
29 |
use_safetensors=False,
|
30 |
local_files_only=True,
|
31 |
+
torch_dtype=torch.bfloat16).to(memory_format=torch.channels_last)
|
32 |
+
|
33 |
pipeline = FluxPipeline.from_pretrained(
|
34 |
CHECKPOINT,
|
35 |
revision=REVISION,
|
36 |
transformer=transformer,
|
37 |
local_files_only=True,
|
38 |
torch_dtype=torch.bfloat16,
|
39 |
+
)
|
40 |
|
41 |
+
pipeline.to(memory_format=torch.channels_last)
|
42 |
quantize_(pipeline.vae, int8_weight_only())
|
|
|
43 |
pipeline.to("cuda")
|
44 |
+
for _ in range(4):
|
45 |
pipeline("cat", num_inference_steps=4)
|
46 |
|
47 |
return pipeline
|