Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,13 @@ import gradio as gr
|
|
3 |
import json
|
4 |
from transformers import pipeline
|
5 |
|
6 |
-
# Load the translation pipeline
|
7 |
-
text_translator = pipeline(
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
# Load the JSON data for language codes
|
10 |
with open('language.json', 'r') as file:
|
@@ -40,24 +45,30 @@ def translate_text(text, destination_language):
|
|
40 |
if dest_code is None:
|
41 |
return f"Error: Could not find FLORES code for language {destination_language}"
|
42 |
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
45 |
|
46 |
# Initialize the speech-to-text pipeline (Whisper model)
|
47 |
-
#
|
48 |
-
speech_to_text = pipeline("
|
49 |
|
50 |
# Function to transcribe audio to text
|
51 |
def transcribe_audio(audio_file, destination_language):
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
61 |
|
62 |
# Gradio interface
|
63 |
with gr.Blocks() as demo:
|
|
|
3 |
import json
|
4 |
from transformers import pipeline
|
5 |
|
6 |
+
# Load the translation pipeline with eager attention implementation
|
7 |
+
text_translator = pipeline(
|
8 |
+
"translation",
|
9 |
+
model="facebook/nllb-200-distilled-600M",
|
10 |
+
torch_dtype=torch.bfloat16,
|
11 |
+
attn_implementation="eager"
|
12 |
+
)
|
13 |
|
14 |
# Load the JSON data for language codes
|
15 |
with open('language.json', 'r') as file:
|
|
|
45 |
if dest_code is None:
|
46 |
return f"Error: Could not find FLORES code for language {destination_language}"
|
47 |
|
48 |
+
try:
|
49 |
+
translation = text_translator(text, src_lang="eng_Latn", tgt_lang=dest_code)
|
50 |
+
return translation[0]["translation_text"]
|
51 |
+
except Exception as e:
|
52 |
+
return f"Error during translation: {str(e)}"
|
53 |
|
54 |
# Initialize the speech-to-text pipeline (Whisper model)
|
55 |
+
# Using the appropriate Whisper model for automatic speech recognition
|
56 |
+
speech_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-small")
|
57 |
|
58 |
# Function to transcribe audio to text
|
59 |
def transcribe_audio(audio_file, destination_language):
|
60 |
+
try:
|
61 |
+
transcription_result = speech_to_text(audio_file)
|
62 |
+
print(f"Transcription result: {transcription_result}") # Print the whole response to inspect
|
63 |
+
if "text" in transcription_result:
|
64 |
+
transcription = transcription_result["text"]
|
65 |
+
else:
|
66 |
+
return "Error: Unable to transcribe audio."
|
67 |
+
|
68 |
+
# Translate the transcribed text
|
69 |
+
return translate_text(transcription, destination_language)
|
70 |
+
except Exception as e:
|
71 |
+
return f"Error during transcription: {str(e)}"
|
72 |
|
73 |
# Gradio interface
|
74 |
with gr.Blocks() as demo:
|