jason1i commited on
Commit
65dbb51
·
1 Parent(s): 6f4b9a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -14,8 +14,8 @@ asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base",
14
  # load text-to-speech checkpoint and speaker embeddings
15
  #processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
  #Use own TTS Model
17
- processor = SpeechT5Processor.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi")
18
- #processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
19
  #processor = SpeechT5Processor.from_pretrained("Salama1429/TTS_German_Speecht5_finetuned_voxpopuli_nl")
20
  # Load model directly
21
 
@@ -24,8 +24,8 @@ processor = SpeechT5Processor.from_pretrained("jasonl1/speecht5_finetuned_voxpop
24
  #model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
25
  #Use own TTS Model
26
 
27
- model = SpeechT5ForTextToSpeech.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi",ignore_mismatched_sizes=True,)
28
- #model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
29
 
30
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
31
 
@@ -42,7 +42,7 @@ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze
42
  # At Inference. it should use translate(sample["audio"].copy())
43
 
44
  def translate(audio):
45
- outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "fi"})
46
  return outputs["text"]
47
 
48
 
 
14
  # load text-to-speech checkpoint and speaker embeddings
15
  #processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
  #Use own TTS Model
17
+ #processor = SpeechT5Processor.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi")
18
+ processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
19
  #processor = SpeechT5Processor.from_pretrained("Salama1429/TTS_German_Speecht5_finetuned_voxpopuli_nl")
20
  # Load model directly
21
 
 
24
  #model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
25
  #Use own TTS Model
26
 
27
+ #model = SpeechT5ForTextToSpeech.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi",ignore_mismatched_sizes=True,)
28
+ model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
29
 
30
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
31
 
 
42
  # At Inference. it should use translate(sample["audio"].copy())
43
 
44
  def translate(audio):
45
+ outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "nl"})
46
  return outputs["text"]
47
 
48