wasmdashai commited on
Commit
5b3eda3
·
verified ·
1 Parent(s): ef864f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -108,7 +108,7 @@ def get_model(name_model):
108
  global models
109
  if name_model in models:
110
  return models[name_model]
111
- models[name_model]=VitsModel.from_pretrained(name_model,token=token).cuda()
112
  models[name_model].decoder.apply_weight_norm()
113
  # torch.nn.utils.weight_norm(self.decoder.conv_pre)
114
  # torch.nn.utils.weight_norm(self.decoder.conv_post)
@@ -130,7 +130,7 @@ def modelspeech(text=TXT,name_model="wasmdashai/vits-ar-sa-huba-v2",speaking_r
130
  model=get_model(name_model)
131
  model.speaking_rate=speaking_rate
132
  with torch.no_grad():
133
- wav=list(_inference_forward_stream(model,input_ids=inputs.input_ids.cuda(),attention_mask=inputs.attention_mask.cuda(),speaker_embeddings= None,is_streaming=False))[0]
134
  # with torch.no_grad():
135
  # wav = model(input_ids=inputs["input_ids"].cuda()).waveform.cpu().numpy().reshape(-1)#.detach()
136
 
 
108
  global models
109
  if name_model in models:
110
  return models[name_model]
111
+ models[name_model]=VitsModel.from_pretrained(name_model,token=token)
112
  models[name_model].decoder.apply_weight_norm()
113
  # torch.nn.utils.weight_norm(self.decoder.conv_pre)
114
  # torch.nn.utils.weight_norm(self.decoder.conv_post)
 
130
  model=get_model(name_model)
131
  model.speaking_rate=speaking_rate
132
  with torch.no_grad():
133
+ wav=list(_inference_forward_stream(model,input_ids=inputs.input_ids,attention_mask=inputs.attention_mask,speaker_embeddings= None,is_streaming=False))[0]
134
  # with torch.no_grad():
135
  # wav = model(input_ids=inputs["input_ids"].cuda()).waveform.cpu().numpy().reshape(-1)#.detach()
136