Spaces:
Running
Running
nalin0503
commited on
Commit
·
ebdd902
1
Parent(s):
fc843fe
add some cuda memory optimisations
Browse files- Image-Morpher/main.py +13 -0
Image-Morpher/main.py
CHANGED
@@ -9,6 +9,7 @@ from argparse import ArgumentParser
|
|
9 |
from model import DiffMorpherPipeline
|
10 |
import time
|
11 |
import logging
|
|
|
12 |
|
13 |
logs_folder = "logs"
|
14 |
os.makedirs(logs_folder, exist_ok=True)
|
@@ -94,6 +95,13 @@ parser.add_argument("--use_lcm", action="store_true", help="Enable LCM-LoRA acce
|
|
94 |
args = parser.parse_args()
|
95 |
os.makedirs(args.output_path, exist_ok=True)
|
96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
# Create the pipeline from the given model path
|
98 |
pipeline = DiffMorpherPipeline.from_pretrained(args.model_path, torch_dtype=torch.float32)
|
99 |
|
@@ -147,6 +155,11 @@ images = pipeline(
|
|
147 |
images[0].save(f"{args.output_path}/output.gif", save_all=True,
|
148 |
append_images=images[1:], duration=args.duration, loop=0)
|
149 |
|
|
|
|
|
|
|
|
|
|
|
150 |
end_time = time.time()
|
151 |
elapsed_time = end_time - start_time
|
152 |
|
|
|
9 |
from model import DiffMorpherPipeline
|
10 |
import time
|
11 |
import logging
|
12 |
+
import gc
|
13 |
|
14 |
logs_folder = "logs"
|
15 |
os.makedirs(logs_folder, exist_ok=True)
|
|
|
95 |
args = parser.parse_args()
|
96 |
os.makedirs(args.output_path, exist_ok=True)
|
97 |
|
98 |
+
# Clear any existing PyTorch GPU allocations
|
99 |
+
torch.cuda.empty_cache()
|
100 |
+
gc.collect()
|
101 |
+
|
102 |
+
# Set environment variable for memory allocation
|
103 |
+
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
104 |
+
|
105 |
# Create the pipeline from the given model path
|
106 |
pipeline = DiffMorpherPipeline.from_pretrained(args.model_path, torch_dtype=torch.float32)
|
107 |
|
|
|
155 |
images[0].save(f"{args.output_path}/output.gif", save_all=True,
|
156 |
append_images=images[1:], duration=args.duration, loop=0)
|
157 |
|
158 |
+
# Ensure memory is freed after completion
|
159 |
+
pipeline = None
|
160 |
+
torch.cuda.empty_cache()
|
161 |
+
gc.collect()
|
162 |
+
|
163 |
end_time = time.time()
|
164 |
elapsed_time = end_time - start_time
|
165 |
|