mr2along commited on
Commit
e60d8bb
·
verified ·
1 Parent(s): 2e5665b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -28
app.py CHANGED
@@ -17,27 +17,6 @@ if not os.path.exists('audio'):
17
  # Initialize the epitran object for English
18
  epi = epitran.Epitran('eng-Latn')
19
 
20
- # Step 2: Create pronunciation audio for incorrect words
21
- def upfilepath(local_filename):
22
- ts = time.time()
23
- upload_url = f"https://mr2along-speech-recognize.hf.space/gradio_api/upload?upload_id={ts}"
24
- files = {'files': open(local_filename, 'rb')}
25
-
26
- try:
27
- response = requests.post(upload_url, files=files, timeout=30) # Set timeout (e.g., 30 seconds)
28
-
29
- if response.status_code == 200:
30
- result = response.json()
31
- extracted_path = result[0]
32
- return extracted_path
33
- else:
34
- return None
35
-
36
- except requests.exceptions.Timeout:
37
- return "Request timed out. Please try again."
38
- except Exception as e:
39
- return f"An error occurred: {e}"
40
-
41
  # Step 1: Transcribe the audio file
42
  def transcribe_audio(audio):
43
  if audio is None:
@@ -72,7 +51,7 @@ def transcribe_audio(audio):
72
  except sr.RequestError as e:
73
  return f"Error with Google Speech Recognition service: {e}"
74
 
75
- # Step 2: Create pronunciation audio for incorrect words (locally)
76
  def create_pronunciation_audio(word):
77
  try:
78
  tts = gTTS(word)
@@ -110,7 +89,7 @@ def phonetic_respelling(sentence):
110
  def ipa_transcription(sentence):
111
  return epi.transliterate(sentence)
112
 
113
- # Step 3: Compare the transcribed text with the input paragraph
114
  def compare_texts(reference_text, transcribed_text):
115
  reference_words = reference_text.split()
116
  transcribed_words = transcribed_text.split()
@@ -161,13 +140,16 @@ def compare_texts(reference_text, transcribed_text):
161
  suggestion = difflib.get_close_matches(word, reference_words, n=1)
162
  suggestion_text = f" (Did you mean: <em>{suggestion[0]}</em>?)" if suggestion else ""
163
  up_audio = upfilepath(audio)
164
- audio_src = f"https://mr2along-speech-recognize.hf.space/gradio_api/file={up_audio}"
165
- html_output += f'{word}: '
166
- html_output += f'<audio controls><source src="{audio_src}" type="audio/mpeg">Your browser does not support the audio tag.</audio>{suggestion_text}<br>'
 
 
 
167
 
168
  return [html_output]
169
 
170
- # Step 4: Text-to-Speech Function
171
  def text_to_speech(paragraph):
172
  if not paragraph:
173
  return None # Handle the case when no text is provided
@@ -186,7 +168,7 @@ def gradio_function(paragraph, audio):
186
 
187
  # Return comparison result
188
  return comparison_result
189
-
190
  # Gradio Interface using the updated API
191
  interface = gr.Interface(
192
  fn=gradio_function,
 
17
  # Initialize the epitran object for English
18
  epi = epitran.Epitran('eng-Latn')
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  # Step 1: Transcribe the audio file
21
  def transcribe_audio(audio):
22
  if audio is None:
 
51
  except sr.RequestError as e:
52
  return f"Error with Google Speech Recognition service: {e}"
53
 
54
+ # Function for creating pronunciation audio for incorrect words
55
  def create_pronunciation_audio(word):
56
  try:
57
  tts = gTTS(word)
 
89
  def ipa_transcription(sentence):
90
  return epi.transliterate(sentence)
91
 
92
+ # Step 2: Compare the transcribed text with the input paragraph
93
  def compare_texts(reference_text, transcribed_text):
94
  reference_words = reference_text.split()
95
  transcribed_words = transcribed_text.split()
 
140
  suggestion = difflib.get_close_matches(word, reference_words, n=1)
141
  suggestion_text = f" (Did you mean: <em>{suggestion[0]}</em>?)" if suggestion else ""
142
  up_audio = upfilepath(audio)
143
+ if up_audio:
144
+ audio_src = f"https://mr2along-speech-recognize.hf.space/gradio_api/file={up_audio}"
145
+ html_output += f'{word}: '
146
+ html_output += f'<audio controls><source src="{audio_src}" type="audio/mpeg">Your browser does not support the audio tag.</audio>{suggestion_text}<br>'
147
+ else:
148
+ html_output += f'{word}: <span style="color: red;">Audio not available.</span><br>'
149
 
150
  return [html_output]
151
 
152
+ # Step 3: Text-to-Speech Function
153
  def text_to_speech(paragraph):
154
  if not paragraph:
155
  return None # Handle the case when no text is provided
 
168
 
169
  # Return comparison result
170
  return comparison_result
171
+
172
  # Gradio Interface using the updated API
173
  interface = gr.Interface(
174
  fn=gradio_function,