Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import gradio as gr
|
4 |
+
import subprocess
|
5 |
+
import json
|
6 |
+
|
7 |
+
# Set environment variables for HF Spaces
|
8 |
+
os.environ["GRADIO_SERVER_NAME"] = "0.0.0.0"
|
9 |
+
os.environ["GRADIO_SERVER_PORT"] = "7860"
|
10 |
+
|
11 |
+
# Pre-download models cache
|
12 |
+
os.environ["HF_HUB_CACHE"] = "/tmp/hf_cache"
|
13 |
+
os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/hf_cache"
|
14 |
+
|
15 |
+
# Fix potential Hunyuan Video Avatar issues
|
16 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
17 |
+
|
18 |
+
def setup_environment():
|
19 |
+
"""Setup environment for HF Spaces with WanGP v6.3"""
|
20 |
+
# Install additional dependencies if needed
|
21 |
+
try:
|
22 |
+
import sageattention
|
23 |
+
except ImportError:
|
24 |
+
print("Installing SageAttention...")
|
25 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "sageattention==1.0.6"], check=True)
|
26 |
+
|
27 |
+
# Ensure face detection libs are available
|
28 |
+
try:
|
29 |
+
import insightface
|
30 |
+
import facexlib
|
31 |
+
except ImportError:
|
32 |
+
print("Installing face processing libraries...")
|
33 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "insightface", "facexlib"], check=True)
|
34 |
+
|
35 |
+
def download_essential_models():
|
36 |
+
"""Pre-download essential models for faster startup"""
|
37 |
+
from huggingface_hub import snapshot_download
|
38 |
+
|
39 |
+
print("Downloading Hunyuan Video Avatar models...")
|
40 |
+
try:
|
41 |
+
# Download Hunyuan Video Avatar base models
|
42 |
+
snapshot_download(
|
43 |
+
repo_id="tencent/HunyuanVideo-Avatar",
|
44 |
+
cache_dir="/tmp/hf_cache",
|
45 |
+
allow_patterns=["*.safetensors", "*.json", "*.txt", "*.bin"],
|
46 |
+
ignore_patterns=["*.mp4", "*.avi", "*.mov"] # Skip demo videos
|
47 |
+
)
|
48 |
+
|
49 |
+
# Download base Hunyuan Video model
|
50 |
+
snapshot_download(
|
51 |
+
repo_id="tencent/HunyuanVideo",
|
52 |
+
cache_dir="/tmp/hf_cache",
|
53 |
+
allow_patterns=["*.safetensors", "*.json", "*.txt"],
|
54 |
+
ignore_patterns=["*.mp4", "*.avi"]
|
55 |
+
)
|
56 |
+
|
57 |
+
except Exception as e:
|
58 |
+
print(f"Model download warning: {e}")
|
59 |
+
print("Models will be downloaded on-demand during first use.")
|
60 |
+
|
61 |
+
def create_hf_config():
|
62 |
+
"""Create optimized config for HF Spaces deployment"""
|
63 |
+
config = {
|
64 |
+
"model_settings": {
|
65 |
+
"profile": 3, # Optimized for A10G Large
|
66 |
+
"quantize_transformer": True,
|
67 |
+
"attention_mode": "sage",
|
68 |
+
"compile": False, # Disable for stability on HF
|
69 |
+
"teacache": "2.0"
|
70 |
+
},
|
71 |
+
"avatar_settings": {
|
72 |
+
"max_frames": 120, # ~5 seconds at 24fps
|
73 |
+
"resolution": "512x512", # Balanced quality/performance
|
74 |
+
"emotion_control": True,
|
75 |
+
"multi_character": True
|
76 |
+
},
|
77 |
+
"memory_optimization": {
|
78 |
+
"enable_vae_tiling": True,
|
79 |
+
"enable_cpu_offload": True,
|
80 |
+
"max_batch_size": 1,
|
81 |
+
"gradient_checkpointing": True
|
82 |
+
},
|
83 |
+
"audio_processing": {
|
84 |
+
"sample_rate": 16000,
|
85 |
+
"max_duration": 15, # seconds
|
86 |
+
"supported_formats": ["wav", "mp3", "m4a"]
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
with open("/tmp/hf_config.json", "w") as f:
|
91 |
+
json.dump(config, f, indent=2)
|
92 |
+
|
93 |
+
return config
|
94 |
+
|
95 |
+
if __name__ == "__main__":
|
96 |
+
print("🚀 Starting WanGP v6.3 with Hunyuan Video Avatar...")
|
97 |
+
|
98 |
+
setup_environment()
|
99 |
+
config = create_hf_config()
|
100 |
+
download_essential_models()
|
101 |
+
|
102 |
+
print("✅ Setup complete! Launching application...")
|
103 |
+
|
104 |
+
# Import and run the main application
|
105 |
+
from wgp import main
|
106 |
+
main(
|
107 |
+
profile=3, # Higher profile for A10G Large
|
108 |
+
attention="sage", # Use Sage attention for better performance
|
109 |
+
server_name="0.0.0.0",
|
110 |
+
server_port=7860,
|
111 |
+
quantize_transformer=True,
|
112 |
+
teacache="2.0", # Enable TeaCache for Avatar acceleration
|
113 |
+
compile=False, # Disabled for HF Spaces stability
|
114 |
+
share=False, # HF Spaces handles sharing
|
115 |
+
config_file="/tmp/hf_config.json"
|
116 |
+
)
|