Spaces:
Running
Running
Update ootd/inference_ootd.py
Browse files- ootd/inference_ootd.py +5 -5
ootd/inference_ootd.py
CHANGED
@@ -26,9 +26,9 @@ from transformers import AutoProcessor, CLIPVisionModelWithProjection
|
|
26 |
from transformers import CLIPTextModel, CLIPTokenizer
|
27 |
|
28 |
VIT_PATH = "openai/clip-vit-large-patch14"
|
29 |
-
VAE_PATH = "levihsu/
|
30 |
-
UNET_PATH = "levihsu/
|
31 |
-
MODEL_PATH = "levihsu/
|
32 |
|
33 |
class OOTDiffusion:
|
34 |
|
@@ -43,13 +43,13 @@ class OOTDiffusion:
|
|
43 |
|
44 |
unet_garm = UNetGarm2DConditionModel.from_pretrained(
|
45 |
UNET_PATH,
|
46 |
-
subfolder="unet_garm",
|
47 |
torch_dtype=torch.float16,
|
48 |
use_safetensors=True,
|
49 |
)
|
50 |
unet_vton = UNetVton2DConditionModel.from_pretrained(
|
51 |
UNET_PATH,
|
52 |
-
subfolder="unet_vton",
|
53 |
torch_dtype=torch.float16,
|
54 |
use_safetensors=True,
|
55 |
)
|
|
|
26 |
from transformers import CLIPTextModel, CLIPTokenizer
|
27 |
|
28 |
VIT_PATH = "openai/clip-vit-large-patch14"
|
29 |
+
VAE_PATH = "levihsu/ootd"
|
30 |
+
UNET_PATH = "levihsu/ootd"
|
31 |
+
MODEL_PATH = "levihsu/ootd"
|
32 |
|
33 |
class OOTDiffusion:
|
34 |
|
|
|
43 |
|
44 |
unet_garm = UNetGarm2DConditionModel.from_pretrained(
|
45 |
UNET_PATH,
|
46 |
+
subfolder="ootd_hd/checkpoint-36000/unet_garm",
|
47 |
torch_dtype=torch.float16,
|
48 |
use_safetensors=True,
|
49 |
)
|
50 |
unet_vton = UNetVton2DConditionModel.from_pretrained(
|
51 |
UNET_PATH,
|
52 |
+
subfolder="ootd_hd/checkpoint-36000/unet_vton",
|
53 |
torch_dtype=torch.float16,
|
54 |
use_safetensors=True,
|
55 |
)
|