thianfoo commited on
Commit
2add214
·
verified ·
1 Parent(s): a2d1261

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -1
app.py CHANGED
@@ -1,5 +1,90 @@
1
  import gradio as gr
2
  import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import random
4
  from diffusers import DiffusionPipeline
5
  import torch
@@ -143,4 +228,4 @@ with gr.Blocks(css=css) as demo:
143
  outputs = [result]
144
  )
145
 
146
- demo.queue().launch()
 
1
  import gradio as gr
2
  import numpy as np
3
+ import torch
4
+ from datasets import load_dataset
5
+ from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
6
+
7
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
8
+
9
+ title = "GenAI Audio Demo"
10
+ description = """
11
+ Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
12
+ [SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech:
13
+ ![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
14
+ """
15
+
16
+ # Load speech translation pipeline
17
+ asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
18
+
19
+ # Load text-to-speech processor from pretrained dataset
20
+ processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
21
+
22
+ model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
23
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
24
+
25
+ embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
26
+ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
27
+ # Function for translating different language using pretrained models
28
+ def translate(audio):
29
+ outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
30
+ return outputs["text"]
31
+
32
+ # Function to synthesise the text using the processor above
33
+ def synthesise(text):
34
+ inputs = processor(text=text, return_tensors="pt")
35
+ speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
36
+ return speech.cpu()
37
+
38
+ # Main function
39
+ def speech_to_speech_translation(audio):
40
+ translated_text = translate(audio)
41
+ synthesised_speech = synthesise(translated_text)
42
+ synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
43
+ return 16000, synthesised_speech
44
+
45
+ # Function for text to speech
46
+ def text_to_speech(text):
47
+ synthesised_speech = synthesise(text)
48
+ synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
49
+ return 16000, synthesised_speech
50
+
51
+ demo = gr.Blocks()
52
+
53
+ # Mic translation using microphone as the input
54
+ mic_translate = gr.Interface(
55
+ fn=speech_to_speech_translation,
56
+ inputs=gr.Audio(source="microphone", type="filepath"),
57
+ outputs=gr.Audio(label="Generated Speech", type="numpy"),
58
+ title=title,
59
+ description=description,
60
+ )
61
+
62
+ # File translation using uploaded files as input
63
+ file_translate = gr.Interface(
64
+ fn=speech_to_speech_translation,
65
+ inputs=gr.Audio(source="upload", type="filepath"),
66
+ outputs=gr.Audio(label="Generated Speech", type="numpy"),
67
+ examples=[["./english.wav"], ["./chinese.wav"]],
68
+ title=title,
69
+ description=description,
70
+ )
71
+
72
+ # Text translation using text as input
73
+ text_translate = gr.Interface(
74
+ fn=text_to_speech,
75
+ inputs="textbox",
76
+ outputs=gr.Audio(label="Generated Speech", type="numpy"),
77
+ title=title,
78
+ description=description
79
+ )
80
+
81
+ # Showcase the demo using different tabs of the different features
82
+ with demo:
83
+ gr.TabbedInterface([mic_translate, file_translate, text_translate], ["Microphone", "Audio File", "Text to Speech"])
84
+
85
+ demo.launch()
86
+ '''import gradio as gr
87
+ import numpy as np
88
  import random
89
  from diffusers import DiffusionPipeline
90
  import torch
 
228
  outputs = [result]
229
  )
230
 
231
+ demo.queue().launch()'''