Get backend - 9
Browse files- src/pipeline.py +4 -3
src/pipeline.py
CHANGED
@@ -55,10 +55,11 @@ def load_pipeline() -> Pipeline:
|
|
55 |
).to("cuda")
|
56 |
|
57 |
pipeline.transformer.to(memory_format=torch.channels_last)
|
58 |
-
pipeline.
|
|
|
59 |
# quantize_(pipeline.vae, int8_weight_only())
|
60 |
-
|
61 |
-
pipeline.set_progress_bar_config(disable=True)
|
62 |
|
63 |
PROMPT = 'semiconformity, peregrination, quip, twineless, emotionless, tawa, depickle'
|
64 |
with torch.no_grad():
|
|
|
55 |
).to("cuda")
|
56 |
|
57 |
pipeline.transformer.to(memory_format=torch.channels_last)
|
58 |
+
pipeline.vae.to(memory_format=torch.channels_last)
|
59 |
+
pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune")
|
60 |
# quantize_(pipeline.vae, int8_weight_only())
|
61 |
+
pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune")
|
62 |
+
# pipeline.set_progress_bar_config(disable=True)
|
63 |
|
64 |
PROMPT = 'semiconformity, peregrination, quip, twineless, emotionless, tawa, depickle'
|
65 |
with torch.no_grad():
|