multimodalart HF Staff commited on
Commit
d50658d
·
verified ·
1 Parent(s): cfd78bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -13
app.py CHANGED
@@ -39,7 +39,7 @@ from pipeline import CausalInferencePipeline
39
  from demo_utils.constant import ZERO_VAE_CACHE
40
  from demo_utils.vae_block3 import VAEDecoderWrapper
41
  from utils.wan_wrapper import WanDiffusionWrapper, WanTextEncoder
42
- from demo_utils.memory import gpu, get_cuda_free_memory_gb, DynamicSwapInstaller
43
 
44
  # --- Argument Parsing ---
45
  parser = argparse.ArgumentParser(description="Gradio Demo for Self-Forcing with FastRTC")
@@ -51,12 +51,8 @@ parser.add_argument('--share', action='store_true', help="Create a public Gradio
51
  parser.add_argument('--trt', action='store_true', help="Use TensorRT optimized VAE decoder.")
52
  args = parser.parse_args()
53
 
54
- # --- Global Setup & Model Loading ---
55
- print(f"CUDA device: {gpu}")
56
- print(f'Initial Free VRAM: {get_cuda_free_memory_gb(gpu):.2f} GB')
57
- LOW_MEMORY = get_cuda_free_memory_gb(gpu) < 40
58
 
59
- # Load configs
60
  try:
61
  config = OmegaConf.load(args.config_path)
62
  default_config = OmegaConf.load("configs/default_config.yaml")
@@ -77,18 +73,12 @@ except FileNotFoundError as e:
77
  print(f"Error loading checkpoint: {e}\nPlease ensure the checkpoint '{args.checkpoint_path}' exists.")
78
  exit(1)
79
 
80
- # Prepare models for inference
81
  text_encoder.eval().to(dtype=torch.bfloat16).requires_grad_(False)
82
  transformer.eval().to(dtype=torch.float16).requires_grad_(False)
83
 
84
- if LOW_MEMORY:
85
- print("Low memory mode enabled. Using dynamic model swapping.")
86
- DynamicSwapInstaller.install_model(text_encoder, device=gpu)
87
- else:
88
- text_encoder.to(gpu)
89
  transformer.to(gpu)
90
 
91
- # --- VAE Decoder Management ---
92
  APP_STATE = {
93
  "torch_compile_applied": False,
94
  "fp8_applied": False,
 
39
  from demo_utils.constant import ZERO_VAE_CACHE
40
  from demo_utils.vae_block3 import VAEDecoderWrapper
41
  from utils.wan_wrapper import WanDiffusionWrapper, WanTextEncoder
42
+ # from demo_utils.memory import gpu, get_cuda_free_memory_gb, DynamicSwapInstaller
43
 
44
  # --- Argument Parsing ---
45
  parser = argparse.ArgumentParser(description="Gradio Demo for Self-Forcing with FastRTC")
 
51
  parser.add_argument('--trt', action='store_true', help="Use TensorRT optimized VAE decoder.")
52
  args = parser.parse_args()
53
 
54
+ gpu = "cuda"
 
 
 
55
 
 
56
  try:
57
  config = OmegaConf.load(args.config_path)
58
  default_config = OmegaConf.load("configs/default_config.yaml")
 
73
  print(f"Error loading checkpoint: {e}\nPlease ensure the checkpoint '{args.checkpoint_path}' exists.")
74
  exit(1)
75
 
 
76
  text_encoder.eval().to(dtype=torch.bfloat16).requires_grad_(False)
77
  transformer.eval().to(dtype=torch.float16).requires_grad_(False)
78
 
79
+ text_encoder.to(gpu)
 
 
 
 
80
  transformer.to(gpu)
81
 
 
82
  APP_STATE = {
83
  "torch_compile_applied": False,
84
  "fp8_applied": False,