Update app.py
Browse files
app.py
CHANGED
@@ -39,63 +39,63 @@ import numpy as np
|
|
39 |
from huggingface_hub import snapshot_download, hf_hub_download
|
40 |
import torch
|
41 |
|
42 |
-
# FLUX.1-dev
|
43 |
-
snapshot_download(
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
)
|
48 |
-
|
49 |
-
# Florence-2-large
|
50 |
-
snapshot_download(
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
)
|
55 |
-
|
56 |
-
# CLIP ViT Large
|
57 |
-
snapshot_download(
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
)
|
62 |
-
|
63 |
-
# DINO ViT-s16
|
64 |
-
snapshot_download(
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
)
|
69 |
-
|
70 |
-
# mPLUG Visual Question Answering
|
71 |
-
snapshot_download(
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
)
|
76 |
-
|
77 |
-
# XVerse
|
78 |
-
snapshot_download(
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
)
|
83 |
-
|
84 |
-
hf_hub_download(
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
)
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
os.environ["FLORENCE2_MODEL_PATH"] = "
|
93 |
-
os.environ["SAM2_MODEL_PATH"] = "
|
94 |
os.environ["FACE_ID_MODEL_PATH"] = "./checkpoints/model_ir_se50.pth"
|
95 |
-
os.environ["CLIP_MODEL_PATH"] = "
|
96 |
-
os.environ["FLUX_MODEL_PATH"] = "
|
97 |
-
os.environ["DPG_VQA_MODEL_PATH"] = "
|
98 |
-
os.environ["DINO_MODEL_PATH"] = "
|
99 |
|
100 |
dtype = torch.bfloat16
|
101 |
device = "cuda"
|
@@ -131,7 +131,7 @@ accordions = []
|
|
131 |
idip_checkboxes = []
|
132 |
accordion_states = []
|
133 |
|
134 |
-
ckpt_root = "
|
135 |
model.clear_modulation_adapters()
|
136 |
model.pipe.unload_lora_weights()
|
137 |
if not os.path.exists(ckpt_root):
|
@@ -569,9 +569,9 @@ if __name__ == "__main__":
|
|
569 |
db_latent_lora_scale_str, sb_latent_lora_scale_str, vae_lora_scale_str,
|
570 |
indices_state,
|
571 |
session_state,
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
],
|
576 |
outputs=output
|
577 |
)
|
|
|
39 |
from huggingface_hub import snapshot_download, hf_hub_download
|
40 |
import torch
|
41 |
|
42 |
+
# # FLUX.1-dev
|
43 |
+
# snapshot_download(
|
44 |
+
# repo_id="black-forest-labs/FLUX.1-dev",
|
45 |
+
# local_dir="/data/checkpoints/FLUX.1-dev",
|
46 |
+
# local_dir_use_symlinks=False
|
47 |
+
# )
|
48 |
+
|
49 |
+
# # Florence-2-large
|
50 |
+
# snapshot_download(
|
51 |
+
# repo_id="microsoft/Florence-2-large",
|
52 |
+
# local_dir="/data/checkpoints/Florence-2-large",
|
53 |
+
# local_dir_use_symlinks=False
|
54 |
+
# )
|
55 |
+
|
56 |
+
# # CLIP ViT Large
|
57 |
+
# snapshot_download(
|
58 |
+
# repo_id="openai/clip-vit-large-patch14",
|
59 |
+
# local_dir="/data/checkpoints/clip-vit-large-patch14",
|
60 |
+
# local_dir_use_symlinks=False
|
61 |
+
# )
|
62 |
+
|
63 |
+
# # DINO ViT-s16
|
64 |
+
# snapshot_download(
|
65 |
+
# repo_id="facebook/dino-vits16",
|
66 |
+
# local_dir="/data/checkpoints/dino-vits16",
|
67 |
+
# local_dir_use_symlinks=False
|
68 |
+
# )
|
69 |
+
|
70 |
+
# # mPLUG Visual Question Answering
|
71 |
+
# snapshot_download(
|
72 |
+
# repo_id="xingjianleng/mplug_visual-question-answering_coco_large_en",
|
73 |
+
# local_dir="/data/checkpoints/mplug_visual-question-answering_coco_large_en",
|
74 |
+
# local_dir_use_symlinks=False
|
75 |
+
# )
|
76 |
+
|
77 |
+
# # XVerse
|
78 |
+
# snapshot_download(
|
79 |
+
# repo_id="ByteDance/XVerse",
|
80 |
+
# local_dir="/data/checkpoints/XVerse",
|
81 |
+
# local_dir_use_symlinks=False
|
82 |
+
# )
|
83 |
+
|
84 |
+
# hf_hub_download(
|
85 |
+
# repo_id="facebook/sam2.1-hiera-large",
|
86 |
+
# local_dir="/data/checkpoints/",
|
87 |
+
# filename="sam2.1_hiera_large.pt",
|
88 |
+
# )
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
os.environ["FLORENCE2_MODEL_PATH"] = "/data/checkpoints/Florence-2-large"
|
93 |
+
os.environ["SAM2_MODEL_PATH"] = "/data/checkpoints/sam2.1_hiera_large.pt"
|
94 |
os.environ["FACE_ID_MODEL_PATH"] = "./checkpoints/model_ir_se50.pth"
|
95 |
+
os.environ["CLIP_MODEL_PATH"] = "/data/checkpoints/clip-vit-large-patch14"
|
96 |
+
os.environ["FLUX_MODEL_PATH"] = "/data/checkpoints/FLUX.1-dev"
|
97 |
+
os.environ["DPG_VQA_MODEL_PATH"] = "/data/checkpoints/mplug_visual-question-answering_coco_large_en"
|
98 |
+
os.environ["DINO_MODEL_PATH"] = "/data/checkpoints/dino-vits16"
|
99 |
|
100 |
dtype = torch.bfloat16
|
101 |
device = "cuda"
|
|
|
131 |
idip_checkboxes = []
|
132 |
accordion_states = []
|
133 |
|
134 |
+
ckpt_root = "/data/checkpoints/XVerse"
|
135 |
model.clear_modulation_adapters()
|
136 |
model.pipe.unload_lora_weights()
|
137 |
if not os.path.exists(ckpt_root):
|
|
|
569 |
db_latent_lora_scale_str, sb_latent_lora_scale_str, vae_lora_scale_str,
|
570 |
indices_state,
|
571 |
session_state,
|
572 |
+
*images,
|
573 |
+
*captions,
|
574 |
+
*idip_checkboxes,
|
575 |
],
|
576 |
outputs=output
|
577 |
)
|