Spaces:
Runtime error
Runtime error
Commit
·
726d965
1
Parent(s):
da5250a
Update app.py
Browse files
app.py
CHANGED
@@ -4,40 +4,42 @@ import whisper
|
|
4 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
5 |
from gtts import gTTS
|
6 |
|
|
|
|
|
|
|
|
|
|
|
7 |
def speech_to_speech(input_audio, to_lang):
|
8 |
# Save the uploaded audio file
|
9 |
input_file = "input_audio" + os.path.splitext(input_audio.name)[1]
|
10 |
input_audio.save(input_file)
|
11 |
-
|
12 |
# Speech-to-Text (STT)
|
13 |
-
model = whisper.load_model("base")
|
14 |
audio = whisper.load_audio(input_file)
|
15 |
audio = whisper.pad_or_trim(audio)
|
16 |
-
mel = whisper.log_mel_spectrogram(audio).to(
|
17 |
-
_, probs =
|
18 |
options = whisper.DecodingOptions()
|
19 |
-
result = whisper.decode(
|
20 |
text = result.text
|
21 |
lang = max(probs, key=probs.get)
|
22 |
-
|
23 |
# Translate
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
|
31 |
-
|
32 |
# Text-to-Speech (TTS)
|
33 |
tts = gTTS(text=translated_text, lang=to_lang)
|
34 |
output_file = "output_audio.mp3"
|
35 |
tts.save(output_file)
|
36 |
-
|
37 |
return output_file
|
38 |
|
39 |
languages = ["ru", "fr", "es", "de"] # Example languages: Russian, French, Spanish, German
|
40 |
-
file_input = gr.inputs.File(label="Upload Audio",
|
41 |
dropdown = gr.inputs.Dropdown(languages, label="Translation Language")
|
42 |
audio_output = gr.outputs.Audio(type="file", label="Translated Voice")
|
43 |
|
|
|
4 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
5 |
from gtts import gTTS
|
6 |
|
7 |
+
# Load models
|
8 |
+
model_stt = whisper.load_model("base")
|
9 |
+
model_translation = AutoModelForSeq2SeqLM.from_pretrained("alirezamsh/small100")
|
10 |
+
tokenizer_translation = AutoTokenizer.from_pretrained("alirezamsh/small100")
|
11 |
+
|
12 |
def speech_to_speech(input_audio, to_lang):
|
13 |
# Save the uploaded audio file
|
14 |
input_file = "input_audio" + os.path.splitext(input_audio.name)[1]
|
15 |
input_audio.save(input_file)
|
16 |
+
|
17 |
# Speech-to-Text (STT)
|
|
|
18 |
audio = whisper.load_audio(input_file)
|
19 |
audio = whisper.pad_or_trim(audio)
|
20 |
+
mel = whisper.log_mel_spectrogram(audio).to(model_stt.device)
|
21 |
+
_, probs = model_stt.detect_language(mel)
|
22 |
options = whisper.DecodingOptions()
|
23 |
+
result = whisper.decode(model_stt, mel, options)
|
24 |
text = result.text
|
25 |
lang = max(probs, key=probs.get)
|
26 |
+
|
27 |
# Translate
|
28 |
+
tokenizer_translation.src_lang = lang
|
29 |
+
tokenizer_translation.tgt_lang = to_lang
|
30 |
+
encoded_bg = tokenizer_translation(text, return_tensors="pt")
|
31 |
+
generated_tokens = model_translation.generate(**encoded_bg)
|
32 |
+
translated_text = tokenizer_translation.batch_decode(generated_tokens, skip_special_tokens=True)[0]
|
33 |
+
|
|
|
|
|
34 |
# Text-to-Speech (TTS)
|
35 |
tts = gTTS(text=translated_text, lang=to_lang)
|
36 |
output_file = "output_audio.mp3"
|
37 |
tts.save(output_file)
|
38 |
+
|
39 |
return output_file
|
40 |
|
41 |
languages = ["ru", "fr", "es", "de"] # Example languages: Russian, French, Spanish, German
|
42 |
+
file_input = gr.inputs.File(label="Upload Audio", accept="audio/*")
|
43 |
dropdown = gr.inputs.Dropdown(languages, label="Translation Language")
|
44 |
audio_output = gr.outputs.Audio(type="file", label="Translated Voice")
|
45 |
|