ANASAKHTAR commited on
Commit
a45435e
·
verified ·
1 Parent(s): 0581182

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -15
app.py CHANGED
@@ -3,8 +3,13 @@ import gradio as gr
3
  import json
4
  from transformers import pipeline
5
 
6
- # Load the translation pipeline
7
- text_translator = pipeline("translation", model="facebook/nllb-200-distilled-600M", torch_dtype=torch.bfloat16)
 
 
 
 
 
8
 
9
  # Load the JSON data for language codes
10
  with open('language.json', 'r') as file:
@@ -40,24 +45,30 @@ def translate_text(text, destination_language):
40
  if dest_code is None:
41
  return f"Error: Could not find FLORES code for language {destination_language}"
42
 
43
- translation = text_translator(text, src_lang="eng_Latn", tgt_lang=dest_code)
44
- return translation[0]["translation_text"]
 
 
 
45
 
46
  # Initialize the speech-to-text pipeline (Whisper model)
47
- # Use a pipeline as a high-level helper
48
- speech_to_text = pipeline("text-to-audio", model="facebook/musicgen-small")
49
 
50
  # Function to transcribe audio to text
51
  def transcribe_audio(audio_file, destination_language):
52
- transcription_result = speech_to_text(audio_file)
53
- print(f"Transcription result: {transcription_result}") # Print the whole response to inspect
54
- if "text" in transcription_result:
55
- transcription = transcription_result["text"]
56
- else:
57
- return "Error: Unable to transcribe audio."
58
-
59
- return translate_text(transcription, destination_language)
60
-
 
 
 
61
 
62
  # Gradio interface
63
  with gr.Blocks() as demo:
 
3
  import json
4
  from transformers import pipeline
5
 
6
+ # Load the translation pipeline with eager attention implementation
7
+ text_translator = pipeline(
8
+ "translation",
9
+ model="facebook/nllb-200-distilled-600M",
10
+ torch_dtype=torch.bfloat16,
11
+ attn_implementation="eager"
12
+ )
13
 
14
  # Load the JSON data for language codes
15
  with open('language.json', 'r') as file:
 
45
  if dest_code is None:
46
  return f"Error: Could not find FLORES code for language {destination_language}"
47
 
48
+ try:
49
+ translation = text_translator(text, src_lang="eng_Latn", tgt_lang=dest_code)
50
+ return translation[0]["translation_text"]
51
+ except Exception as e:
52
+ return f"Error during translation: {str(e)}"
53
 
54
  # Initialize the speech-to-text pipeline (Whisper model)
55
+ # Using the appropriate Whisper model for automatic speech recognition
56
+ speech_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-small")
57
 
58
  # Function to transcribe audio to text
59
  def transcribe_audio(audio_file, destination_language):
60
+ try:
61
+ transcription_result = speech_to_text(audio_file)
62
+ print(f"Transcription result: {transcription_result}") # Print the whole response to inspect
63
+ if "text" in transcription_result:
64
+ transcription = transcription_result["text"]
65
+ else:
66
+ return "Error: Unable to transcribe audio."
67
+
68
+ # Translate the transcribed text
69
+ return translate_text(transcription, destination_language)
70
+ except Exception as e:
71
+ return f"Error during transcription: {str(e)}"
72
 
73
  # Gradio interface
74
  with gr.Blocks() as demo: