jason1i commited on
Commit
92d4026
·
1 Parent(s): 9d953d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -14,7 +14,8 @@ asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base",
14
  # load text-to-speech checkpoint and speaker embeddings
15
  #processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
  #Use own TTS Model
17
- processor = SpeechT5Processor.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi")
 
18
  #processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
19
  #processor = SpeechT5Processor.from_pretrained("Salama1429/TTS_German_Speecht5_finetuned_voxpopuli_nl")
20
  # Load model directly
@@ -23,7 +24,8 @@ processor = SpeechT5Processor.from_pretrained("jasonl1/speecht5_finetuned_voxpop
23
 
24
  #model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
25
  #Use own TTS Model
26
- model = SpeechT5ForTextToSpeech.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi",ignore_mismatched_sizes=True,)
 
27
  #model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
28
 
29
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
@@ -41,7 +43,7 @@ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze
41
  # At Inference. it should use translate(sample["audio"].copy())
42
 
43
  def translate(audio):
44
- outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "fi"})
45
  return outputs["text"]
46
 
47
 
 
14
  # load text-to-speech checkpoint and speaker embeddings
15
  #processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
  #Use own TTS Model
17
+ processor = SpeechT5Processor.from_pretrained("jason1i/speecht5_finetuned_voxpopuli_nl")
18
+ #processor = SpeechT5Processor.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi")
19
  #processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
20
  #processor = SpeechT5Processor.from_pretrained("Salama1429/TTS_German_Speecht5_finetuned_voxpopuli_nl")
21
  # Load model directly
 
24
 
25
  #model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
26
  #Use own TTS Model
27
+ model = SpeechT5ForTextToSpeech.from_pretrained("jason1i/speecht5_finetuned_voxpopuli_nl",ignore_mismatched_sizes=True,)
28
+ #model = SpeechT5ForTextToSpeech.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi",ignore_mismatched_sizes=True,)
29
  #model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
30
 
31
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
 
43
  # At Inference. it should use translate(sample["audio"].copy())
44
 
45
  def translate(audio):
46
+ outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "nl"})
47
  return outputs["text"]
48
 
49