Attention slicing is deprecated and cannot really be the memory bottleneck anymore
#1
by
patrickvonplaten
- opened
- pipelines/img2img.py +0 -4
pipelines/img2img.py
CHANGED
|
@@ -112,10 +112,6 @@ class Pipeline:
|
|
| 112 |
if device.type != "mps":
|
| 113 |
self.pipe.unet.to(memory_format=torch.channels_last)
|
| 114 |
|
| 115 |
-
# check if computer has less than 64GB of RAM using sys or os
|
| 116 |
-
if psutil.virtual_memory().total < 64 * 1024**3:
|
| 117 |
-
self.pipe.enable_attention_slicing()
|
| 118 |
-
|
| 119 |
if args.torch_compile:
|
| 120 |
print("Running torch compile")
|
| 121 |
self.pipe.unet = torch.compile(
|
|
|
|
| 112 |
if device.type != "mps":
|
| 113 |
self.pipe.unet.to(memory_format=torch.channels_last)
|
| 114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
if args.torch_compile:
|
| 116 |
print("Running torch compile")
|
| 117 |
self.pipe.unet = torch.compile(
|