vadhri commited on
Commit
f9e92c9
·
verified ·
1 Parent(s): 6ec365e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -17
app.py CHANGED
@@ -14,11 +14,10 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
14
  asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
15
 
16
  # load text-to-speech checkpoint and speaker embeddings
17
- processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
18
 
19
  # model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
20
- model = VitsModel.from_pretrained("facebook/mms-tts-spa").to(device)
21
- tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-spa")
22
 
23
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
24
 
@@ -27,30 +26,20 @@ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze
27
 
28
 
29
  def translate(audio):
30
- outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "es"})
31
- print ('Translated text : ', outputs["text"])
32
  return outputs["text"]
33
 
34
 
35
  def synthesise(text):
36
- inputs = tokenizer(text, return_tensors="pt")
37
-
38
- with torch.no_grad():
39
- speech = model(**inputs)
40
-
41
- print (speech)
42
-
43
- return speech.waveform
44
 
45
- def speech_to_speech_translation_fix(audio,voice_preset="v2/zh_speaker_1"):
46
- synthesised_rate,synthesised_speech = speech_to_speech_translation(audio,voice_preset)
47
- return synthesised_rate,synthesised_speech.T
48
 
49
  def speech_to_speech_translation(audio):
50
  translated_text = translate(audio)
51
  synthesised_speech = synthesise(translated_text)
52
  synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
53
- synthesised_speech = speech_to_speech_translation_fix(synthesised_speech)
54
  return 16000, synthesised_speech
55
 
56
 
 
14
  asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
15
 
16
  # load text-to-speech checkpoint and speaker embeddings
17
+ processor = SpeechT5Processor.from_pretrained("vadhri/speecht5_finetuned_voxpopuli_nl")
18
 
19
  # model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
20
+ model = VitsModel.from_pretrained("vadhri/speecht5_finetuned_voxpopuli_nl").to(device)
 
21
 
22
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
23
 
 
26
 
27
 
28
  def translate(audio):
29
+ outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "nl"})
 
30
  return outputs["text"]
31
 
32
 
33
  def synthesise(text):
34
+ inputs = processor(text=text, return_tensors="pt")
35
+ speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
36
+ return speech.cpu()
 
 
 
 
 
37
 
 
 
 
38
 
39
  def speech_to_speech_translation(audio):
40
  translated_text = translate(audio)
41
  synthesised_speech = synthesise(translated_text)
42
  synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
 
43
  return 16000, synthesised_speech
44
 
45