Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import speech_recognition as sr
|
| 2 |
from googletrans import Translator
|
| 3 |
from textblob import TextBlob
|
|
@@ -6,7 +7,6 @@ from nltk.tokenize import word_tokenize
|
|
| 6 |
from nltk.corpus import stopwords
|
| 7 |
from nltk.stem import WordNetLemmatizer
|
| 8 |
from gtts import gTTS
|
| 9 |
-
import gradio as gr
|
| 10 |
import tempfile
|
| 11 |
import os
|
| 12 |
|
|
@@ -34,7 +34,7 @@ def text_to_speech(text):
|
|
| 34 |
tts = gTTS(text)
|
| 35 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
|
| 36 |
tts.save(fp.name)
|
| 37 |
-
|
| 38 |
|
| 39 |
def process_input(input_text, input_audio, feature, target_language, output_language):
|
| 40 |
if input_audio is not None:
|
|
@@ -75,7 +75,7 @@ def tts_function(text):
|
|
| 75 |
return None
|
| 76 |
|
| 77 |
# Create Gradio interface
|
| 78 |
-
with gr.Blocks() as
|
| 79 |
gr.Markdown("# The Advanced Multi-Faceted Chatbot")
|
| 80 |
gr.Markdown("Enter text or speak to interact with the chatbot. Choose a feature and specify languages for translation if needed.")
|
| 81 |
|
|
@@ -85,8 +85,8 @@ with gr.Blocks() as iface:
|
|
| 85 |
|
| 86 |
with gr.Row():
|
| 87 |
feature = gr.Radio(["Translation", "Voice Command", "Transcription"], label="Feature")
|
| 88 |
-
target_language = gr.Textbox(label="Target Language")
|
| 89 |
-
output_language = gr.Textbox(label="Output Language")
|
| 90 |
|
| 91 |
submit_button = gr.Button("Process")
|
| 92 |
result_text = gr.Textbox(label="Result")
|
|
@@ -98,7 +98,7 @@ with gr.Blocks() as iface:
|
|
| 98 |
inputs=[input_text, input_audio, feature, target_language, output_language],
|
| 99 |
outputs=[result_text, audio_output]
|
| 100 |
)
|
| 101 |
-
|
| 102 |
tts_button.click(
|
| 103 |
tts_function,
|
| 104 |
inputs=[result_text],
|
|
@@ -106,4 +106,4 @@ with gr.Blocks() as iface:
|
|
| 106 |
)
|
| 107 |
|
| 108 |
# Launch the interface
|
| 109 |
-
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
import speech_recognition as sr
|
| 3 |
from googletrans import Translator
|
| 4 |
from textblob import TextBlob
|
|
|
|
| 7 |
from nltk.corpus import stopwords
|
| 8 |
from nltk.stem import WordNetLemmatizer
|
| 9 |
from gtts import gTTS
|
|
|
|
| 10 |
import tempfile
|
| 11 |
import os
|
| 12 |
|
|
|
|
| 34 |
tts = gTTS(text)
|
| 35 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
|
| 36 |
tts.save(fp.name)
|
| 37 |
+
return fp.name
|
| 38 |
|
| 39 |
def process_input(input_text, input_audio, feature, target_language, output_language):
|
| 40 |
if input_audio is not None:
|
|
|
|
| 75 |
return None
|
| 76 |
|
| 77 |
# Create Gradio interface
|
| 78 |
+
with gr.Blocks() as demo:
|
| 79 |
gr.Markdown("# The Advanced Multi-Faceted Chatbot")
|
| 80 |
gr.Markdown("Enter text or speak to interact with the chatbot. Choose a feature and specify languages for translation if needed.")
|
| 81 |
|
|
|
|
| 85 |
|
| 86 |
with gr.Row():
|
| 87 |
feature = gr.Radio(["Translation", "Voice Command", "Transcription"], label="Feature")
|
| 88 |
+
target_language = gr.Textbox(label="Target Language ")
|
| 89 |
+
output_language = gr.Textbox(label="Output Language ")
|
| 90 |
|
| 91 |
submit_button = gr.Button("Process")
|
| 92 |
result_text = gr.Textbox(label="Result")
|
|
|
|
| 98 |
inputs=[input_text, input_audio, feature, target_language, output_language],
|
| 99 |
outputs=[result_text, audio_output]
|
| 100 |
)
|
| 101 |
+
|
| 102 |
tts_button.click(
|
| 103 |
tts_function,
|
| 104 |
inputs=[result_text],
|
|
|
|
| 106 |
)
|
| 107 |
|
| 108 |
# Launch the interface
|
| 109 |
+
demo.launch()
|