jason1i commited on
Commit
7db10e0
1 Parent(s): 9ec6a3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -14,16 +14,16 @@ asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base",
14
  # load text-to-speech checkpoint and speaker embeddings
15
  #processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
  #Use own TTS Model
17
- #processor = SpeechT5Processor.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi")
18
  #processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
19
- processor = SpeechT5Processor.from_pretrained("Salama1429/TTS_German_Speecht5_finetuned_voxpopuli_nl")
20
 
21
  model = SpeechT5ForTextToSpeech.from_pretrained("Salama1429/TTS_German_Speecht5_finetuned_voxpopuli_nl")
22
 
23
  #model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
24
  #Use own TTS Model
25
- #model = SpeechT5ForTextToSpeech.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi")
26
- #model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
27
 
28
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
29
 
@@ -40,7 +40,7 @@ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze
40
  # At Inference. it should use translate(sample["audio"].copy())
41
 
42
  def translate(audio):
43
- outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "de"})
44
  return outputs["text"]
45
 
46
 
 
14
  # load text-to-speech checkpoint and speaker embeddings
15
  #processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
16
  #Use own TTS Model
17
+ processor = SpeechT5Processor.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi")
18
  #processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
19
+ #processor = SpeechT5Processor.from_pretrained("Salama1429/TTS_German_Speecht5_finetuned_voxpopuli_nl")
20
 
21
  model = SpeechT5ForTextToSpeech.from_pretrained("Salama1429/TTS_German_Speecht5_finetuned_voxpopuli_nl")
22
 
23
  #model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
24
  #Use own TTS Model
25
+ model = SpeechT5ForTextToSpeech.from_pretrained("jasonl1/speecht5_finetuned_voxpopuli_fi",ignore_mismatched_sizes=True,)
26
+ model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
27
 
28
  vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
29
 
 
40
  # At Inference. it should use translate(sample["audio"].copy())
41
 
42
  def translate(audio):
43
+ outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "fi"})
44
  return outputs["text"]
45
 
46