Use torch.float16 to fit within 24 GB VRAM
Browse files- src/pipeline.py +2 -0
src/pipeline.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
from PIL.Image import Image
|
2 |
from diffusers import StableDiffusionXLPipeline
|
3 |
from pipelines.models import TextToImageRequest
|
@@ -9,6 +10,7 @@ def load_pipeline() -> StableDiffusionXLPipeline:
|
|
9 |
"stablediffusionapi/newdream-sdxl-20",
|
10 |
revision="4bdd502bca7abd1ea57ee12fba0b0f23052958cc",
|
11 |
cache_dir="./models",
|
|
|
12 |
local_files_only=True,
|
13 |
).to("cuda")
|
14 |
|
|
|
1 |
+
import torch
|
2 |
from PIL.Image import Image
|
3 |
from diffusers import StableDiffusionXLPipeline
|
4 |
from pipelines.models import TextToImageRequest
|
|
|
10 |
"stablediffusionapi/newdream-sdxl-20",
|
11 |
revision="4bdd502bca7abd1ea57ee12fba0b0f23052958cc",
|
12 |
cache_dir="./models",
|
13 |
+
torch_dtype=torch.float16,
|
14 |
local_files_only=True,
|
15 |
).to("cuda")
|
16 |
|