Fabrice-TIERCELIN commited on
Commit
985a4fc
·
verified ·
1 Parent(s): fea04d7

snapshot_download(repo_id="openai/clip-vit-large-patch14", repo_type="model", local_dir="ckpts/text_encoder_2", force_download=True)

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -16,19 +16,19 @@ from hyvideo.constants import NEGATIVE_PROMPT
16
 
17
  from huggingface_hub import snapshot_download
18
 
19
- #if torch.cuda.device_count() > 0:
20
- # snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
21
- # snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)#
22
- #
23
- # class Args:
24
- # def __init__(self, input_dir, output_dir):
25
- # self.input_dir = input_dir
26
- # self.output_dir = output_dir
27
- #
28
- # # Create the object
29
- # args = Args("ckpts/llava-llama-3-8b-v1_1-transformers", "ckpts/text_encoder")
30
- # preprocess_text_encoder_tokenizer(args)
31
- # snapshot_download(repo_id="openai/clip-vit-large-patch14", repo_type="model", local_dir="ckpts/text_encoder_2", force_download=True)
32
 
33
  def initialize_model(model_path):
34
  print("initialize_model: " + model_path)
 
16
 
17
  from huggingface_hub import snapshot_download
18
 
19
+ if torch.cuda.device_count() > 0:
20
+ snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
21
+ snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)#
22
+
23
+ class Args:
24
+ def __init__(self, input_dir, output_dir):
25
+ self.input_dir = input_dir
26
+ self.output_dir = output_dir
27
+
28
+ # Create the object
29
+ args = Args("ckpts/llava-llama-3-8b-v1_1-transformers", "ckpts/text_encoder")
30
+ preprocess_text_encoder_tokenizer(args)
31
+ snapshot_download(repo_id="openai/clip-vit-large-patch14", repo_type="model", local_dir="ckpts/text_encoder_2", force_download=True)
32
 
33
  def initialize_model(model_path):
34
  print("initialize_model: " + model_path)