pengdaqian commited on
Commit
99954a1
·
1 Parent(s): dbea546
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -83,11 +83,16 @@ model = SynthesizerInfer(
83
  load_svc_model("vits_pretrain/sovits5.0-48k-debug.pth", model)
84
  model.eval()
85
  model.to(device)
 
 
86
  whisper_model = whisper.inference.load_model(os.path.join("whisper_pretrain", "medium.pt"))
87
  whisper_quant_model = torch.quantization.quantize_dynamic(
88
  whisper_model, {torch.nn.Linear}, dtype=torch.qint8
89
  )
90
  splitter_model = Splitter.from_pretrained(os.path.join("torchspleeter/models/2stems", "spleeter.pth")).to(device).eval()
 
 
 
91
 
92
 
93
  # warm up
@@ -203,7 +208,7 @@ def svc_main(sid, input_audio):
203
  if not os.path.exists(tmpfile_path):
204
  os.makedirs(tmpfile_path)
205
 
206
- sound_split(splitter_model, input_audio_tmp_file, tmpfile_path)
207
 
208
  curr_tmp_path = tmpfile_path
209
  vocals_filepath = os.path.join(curr_tmp_path, 'vocals.wav')
 
83
  load_svc_model("vits_pretrain/sovits5.0-48k-debug.pth", model)
84
  model.eval()
85
  model.to(device)
86
+ model.enc_p = torch.quantization.quantize_dynamic(model.enc_p, {torch.nn.Linear}, dtype=torch.qint8)
87
+
88
  whisper_model = whisper.inference.load_model(os.path.join("whisper_pretrain", "medium.pt"))
89
  whisper_quant_model = torch.quantization.quantize_dynamic(
90
  whisper_model, {torch.nn.Linear}, dtype=torch.qint8
91
  )
92
  splitter_model = Splitter.from_pretrained(os.path.join("torchspleeter/models/2stems", "spleeter.pth")).to(device).eval()
93
+ splitter_quant_model = torch.quantization.quantize_dynamic(
94
+ splitter_model, {torch.nn.Linear}, dtype=torch.qint8
95
+ )
96
 
97
 
98
  # warm up
 
208
  if not os.path.exists(tmpfile_path):
209
  os.makedirs(tmpfile_path)
210
 
211
+ sound_split(splitter_quant_model, input_audio_tmp_file, tmpfile_path)
212
 
213
  curr_tmp_path = tmpfile_path
214
  vocals_filepath = os.path.join(curr_tmp_path, 'vocals.wav')