Spaces:
Sleeping
Sleeping
Commit
·
e75d9d3
1
Parent(s):
68dc913
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,6 @@ import streamlit as st
|
|
2 |
from transformers import pipeline
|
3 |
from gtts import gTTS
|
4 |
import speech_recognition as sr
|
5 |
-
import pyaudio
|
6 |
|
7 |
# Create a translation pipeline
|
8 |
pipe = pipeline('translation', model='Helsinki-NLP/opus-mt-en-hi')
|
@@ -13,28 +12,17 @@ recognizer = sr.Recognizer()
|
|
13 |
# Create a Streamlit input element for microphone input
|
14 |
audio_input = st.empty()
|
15 |
|
|
|
16 |
if st.checkbox("Use Microphone for English Input"):
|
17 |
with audio_input:
|
18 |
st.warning("Listening for audio input... Speak in English.")
|
19 |
try:
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
# Use PyAudio for microphone input
|
24 |
-
with pa.open(format=pyaudio.paInt16,
|
25 |
-
channels=1,
|
26 |
-
rate=44100,
|
27 |
-
input=True,
|
28 |
-
frames_per_buffer=1024) as stream:
|
29 |
-
audio_data = stream.read(44100) # Adjust the number of frames as needed
|
30 |
-
|
31 |
-
# Close the PyAudio object
|
32 |
-
pa.terminate()
|
33 |
-
|
34 |
st.success("Audio input recorded. Translating...")
|
35 |
|
36 |
# Recognize the English speech
|
37 |
-
english_text = recognizer.recognize_google(
|
38 |
|
39 |
# Translate the English text to Hindi
|
40 |
out = pipe(english_text, src_lang='en', tgt_lang='hi')
|
@@ -44,9 +32,11 @@ if st.checkbox("Use Microphone for English Input"):
|
|
44 |
st.text(f"English Input: {english_text}")
|
45 |
st.text(f"Hindi Translation: {translation_text}")
|
46 |
|
|
|
47 |
tts = gTTS(translation_text, lang='hi')
|
48 |
tts.save("translated_audio.mp3")
|
49 |
|
|
|
50 |
st.audio("translated_audio.mp3", format='audio/mp3')
|
51 |
|
52 |
except sr.WaitTimeoutError:
|
|
|
2 |
from transformers import pipeline
|
3 |
from gtts import gTTS
|
4 |
import speech_recognition as sr
|
|
|
5 |
|
6 |
# Create a translation pipeline
|
7 |
pipe = pipeline('translation', model='Helsinki-NLP/opus-mt-en-hi')
|
|
|
12 |
# Create a Streamlit input element for microphone input
|
13 |
audio_input = st.empty()
|
14 |
|
15 |
+
# Check if the microphone input is requested
|
16 |
if st.checkbox("Use Microphone for English Input"):
|
17 |
with audio_input:
|
18 |
st.warning("Listening for audio input... Speak in English.")
|
19 |
try:
|
20 |
+
with sr.Microphone() as source:
|
21 |
+
audio = recognizer.listen(source)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
st.success("Audio input recorded. Translating...")
|
23 |
|
24 |
# Recognize the English speech
|
25 |
+
english_text = recognizer.recognize_google(audio, language='en')
|
26 |
|
27 |
# Translate the English text to Hindi
|
28 |
out = pipe(english_text, src_lang='en', tgt_lang='hi')
|
|
|
32 |
st.text(f"English Input: {english_text}")
|
33 |
st.text(f"Hindi Translation: {translation_text}")
|
34 |
|
35 |
+
# Convert the translated text to speech
|
36 |
tts = gTTS(translation_text, lang='hi')
|
37 |
tts.save("translated_audio.mp3")
|
38 |
|
39 |
+
# Display the audio player for listening to the speech
|
40 |
st.audio("translated_audio.mp3", format='audio/mp3')
|
41 |
|
42 |
except sr.WaitTimeoutError:
|