Update app.py
Browse filesUpdated for mms model instead of SpeechT5
app.py
CHANGED
@@ -3,7 +3,7 @@ import numpy as np
|
|
3 |
import torch
|
4 |
from datasets import load_dataset
|
5 |
|
6 |
-
from transformers import
|
7 |
|
8 |
|
9 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
@@ -12,23 +12,31 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
12 |
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
|
13 |
|
14 |
# load text-to-speech checkpoint and speaker embeddings
|
15 |
-
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
|
16 |
|
17 |
-
model =
|
18 |
-
|
19 |
|
20 |
-
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
|
21 |
-
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
|
22 |
|
23 |
|
24 |
def translate(audio):
|
25 |
-
outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "
|
26 |
return outputs["text"]
|
27 |
|
28 |
|
29 |
def synthesise(text):
|
30 |
-
inputs = processor(text=text, return_tensors="pt")
|
31 |
-
speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
return speech.cpu()
|
33 |
|
34 |
|
|
|
3 |
import torch
|
4 |
from datasets import load_dataset
|
5 |
|
6 |
+
from transformers import VitsModel, VitsTokenizer, pipeline
|
7 |
|
8 |
|
9 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
12 |
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
|
13 |
|
14 |
# load text-to-speech checkpoint and speaker embeddings
|
15 |
+
#processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
|
16 |
|
17 |
+
model = VitsModel.from_pretrained("Matthijs/mms-tts-fra").to(device)
|
18 |
+
tokenizer = VitsTokenizer.from_pretrained("Matthijs/mms-tts-fra").to(device)
|
19 |
|
20 |
+
#embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
|
21 |
+
#speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
|
22 |
|
23 |
|
24 |
def translate(audio):
|
25 |
+
outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "fr"})
|
26 |
return outputs["text"]
|
27 |
|
28 |
|
29 |
def synthesise(text):
|
30 |
+
#inputs = processor(text=text, return_tensors="pt")
|
31 |
+
#speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
|
32 |
+
inputs = tokenizer(text_example, return_tensors="pt")
|
33 |
+
input_ids = inputs["input_ids"]
|
34 |
+
|
35 |
+
|
36 |
+
with torch.no_grad():
|
37 |
+
outputs = model(input_ids)
|
38 |
+
|
39 |
+
speech = outputs.audio[0]
|
40 |
return speech.cpu()
|
41 |
|
42 |
|