MohamedRashad commited on
Commit
4d19c03
·
1 Parent(s): bdb501d

Lazy load the model in infer_EGTTS to improve performance

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -31,16 +31,20 @@ vocab_path = str(vocab_path)
31
  model_path = str(model_path.parent)
32
  speaker_audio_path = str(speaker_audio_path)
33
 
34
- print("Loading model...")
35
- device = "cuda" if torch.cuda.is_available() else "cpu"
36
- config = XttsConfig()
37
- config.load_json(config_path)
38
- model = Xtts.init_from_config(config)
39
- model.load_checkpoint(config, checkpoint_dir=model_path, use_deepspeed=True, vocab_path=vocab_path)
40
- model.to(device)
41
 
42
  @spaces.GPU
43
  def infer_EGTTS(text: str, speaker_audio_path: str, temperature: float = 0.75):
 
 
 
 
 
 
 
 
 
 
44
  print("Computing speaker latents...")
45
  gpt_cond_latent, speaker_embedding = model.get_conditioning_latents(audio_path=[speaker_audio_path])
46
 
 
31
  model_path = str(model_path.parent)
32
  speaker_audio_path = str(speaker_audio_path)
33
 
34
+ model = None
 
 
 
 
 
 
35
 
36
  @spaces.GPU
37
  def infer_EGTTS(text: str, speaker_audio_path: str, temperature: float = 0.75):
38
+ global model
39
+ if model is None:
40
+ print("Loading model...")
41
+ device = "cuda" if torch.cuda.is_available() else "cpu"
42
+ config = XttsConfig()
43
+ config.load_json(config_path)
44
+ model = Xtts.init_from_config(config)
45
+ model.load_checkpoint(config, checkpoint_dir=model_path, use_deepspeed=True, vocab_path=vocab_path)
46
+ model.to(device)
47
+
48
  print("Computing speaker latents...")
49
  gpt_cond_latent, speaker_embedding = model.get_conditioning_latents(audio_path=[speaker_audio_path])
50