Spaces:
Runtime error
Runtime error
model = None
Browse files
app.py
CHANGED
@@ -8,27 +8,27 @@ import random
|
|
8 |
import spaces
|
9 |
import torch
|
10 |
|
11 |
-
from hyvideo.utils.file_utils import save_videos_grid
|
12 |
-
from hyvideo.utils.preprocess_text_encoder_tokenizer_utils import preprocess_text_encoder_tokenizer
|
13 |
-
from hyvideo.config import parse_args
|
14 |
-
from hyvideo.inference import HunyuanVideoSampler
|
15 |
-
from hyvideo.constants import NEGATIVE_PROMPT
|
16 |
|
17 |
-
from huggingface_hub import snapshot_download
|
18 |
|
19 |
-
if torch.cuda.device_count() > 0:
|
20 |
-
snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
|
21 |
-
snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)
|
22 |
-
|
23 |
-
class Args:
|
24 |
-
def __init__(self, input_dir, output_dir):
|
25 |
-
self.input_dir = input_dir
|
26 |
-
self.output_dir = output_dir
|
27 |
-
|
28 |
-
# Create the object
|
29 |
-
args = Args("ckpts/llava-llama-3-8b-v1_1-transformers", "ckpts/text_encoder")
|
30 |
-
preprocess_text_encoder_tokenizer(args)
|
31 |
-
snapshot_download(repo_id="openai/clip-vit-large-patch14", repo_type="model", local_dir="ckpts/text_encoder_2", force_download=True)
|
32 |
|
33 |
def initialize_model(model_path):
|
34 |
print("initialize_model: " + model_path)
|
@@ -45,7 +45,8 @@ def initialize_model(model_path):
|
|
45 |
print("Model initialized: " + model_path)
|
46 |
return hunyuan_video_sampler
|
47 |
|
48 |
-
model = initialize_model("ckpts")
|
|
|
49 |
|
50 |
def generate_video(
|
51 |
prompt,
|
|
|
8 |
import spaces
|
9 |
import torch
|
10 |
|
11 |
+
#from hyvideo.utils.file_utils import save_videos_grid
|
12 |
+
#from hyvideo.utils.preprocess_text_encoder_tokenizer_utils import preprocess_text_encoder_tokenizer
|
13 |
+
#from hyvideo.config import parse_args
|
14 |
+
#from hyvideo.inference import HunyuanVideoSampler
|
15 |
+
#from hyvideo.constants import NEGATIVE_PROMPT
|
16 |
|
17 |
+
#from huggingface_hub import snapshot_download
|
18 |
|
19 |
+
#if torch.cuda.device_count() > 0:
|
20 |
+
# snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
|
21 |
+
# snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)#
|
22 |
+
#
|
23 |
+
# class Args:
|
24 |
+
# def __init__(self, input_dir, output_dir):
|
25 |
+
# self.input_dir = input_dir
|
26 |
+
# self.output_dir = output_dir
|
27 |
+
#
|
28 |
+
# # Create the object
|
29 |
+
# args = Args("ckpts/llava-llama-3-8b-v1_1-transformers", "ckpts/text_encoder")
|
30 |
+
# preprocess_text_encoder_tokenizer(args)
|
31 |
+
# snapshot_download(repo_id="openai/clip-vit-large-patch14", repo_type="model", local_dir="ckpts/text_encoder_2", force_download=True)
|
32 |
|
33 |
def initialize_model(model_path):
|
34 |
print("initialize_model: " + model_path)
|
|
|
45 |
print("Model initialized: " + model_path)
|
46 |
return hunyuan_video_sampler
|
47 |
|
48 |
+
#model = initialize_model("ckpts")
|
49 |
+
model = None
|
50 |
|
51 |
def generate_video(
|
52 |
prompt,
|