RTHGV commited on
Commit
e6bead9
·
verified ·
1 Parent(s): 646ecd1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -15
app.py CHANGED
@@ -1,9 +1,14 @@
1
- import gradio as gr
2
  from googletrans import Translator
 
3
  import nltk
4
  from nltk.tokenize import word_tokenize
5
  from nltk.corpus import stopwords
6
  from nltk.stem import WordNetLemmatizer
 
 
 
 
7
 
8
  # Download necessary NLTK data
9
  nltk.download('punkt', quiet=True)
@@ -11,6 +16,7 @@ nltk.download('stopwords', quiet=True)
11
  nltk.download('wordnet', quiet=True)
12
 
13
  # Initialize components
 
14
  translator = Translator()
15
 
16
  def natural_language_understanding(text):
@@ -24,33 +30,80 @@ def translate_text(text, target_language):
24
  translated = translator.translate(text, dest=target_language)
25
  return translated.text
26
 
27
- def process_input(input_text, feature, target_language):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  if not input_text:
29
- return "No input provided"
30
 
31
  processed_text = natural_language_understanding(input_text)
32
 
33
  if feature == "Translation":
34
  result = translate_text(processed_text, target_language)
 
 
35
  elif feature == "Transcription":
36
  result = processed_text
37
  else:
38
  result = "Invalid feature selected"
39
 
40
- return result
 
 
 
 
 
 
 
 
41
 
42
  # Create Gradio interface
43
- iface = gr.Interface(
44
- fn=process_input,
45
- inputs=[
46
- gr.Textbox(label="Input Text"),
47
- gr.Radio(["Translation", "Transcription"], label="Feature"),
48
- gr.Textbox(label="Target Language (for translation)")
49
- ],
50
- outputs=gr.Textbox(label="Result"),
51
- title="Simple Multi-Faceted Chatbot",
52
- description="Enter text, choose a feature, and specify a target language for translation if needed."
53
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
  # Launch the interface
56
  iface.launch(inline = False)
 
1
+ import speech_recognition as sr
2
  from googletrans import Translator
3
+ from textblob import TextBlob
4
  import nltk
5
  from nltk.tokenize import word_tokenize
6
  from nltk.corpus import stopwords
7
  from nltk.stem import WordNetLemmatizer
8
+ from gtts import gTTS
9
+ import gradio as gr
10
+ import tempfile
11
+ import os
12
 
13
  # Download necessary NLTK data
14
  nltk.download('punkt', quiet=True)
 
16
  nltk.download('wordnet', quiet=True)
17
 
18
  # Initialize components
19
+ recognizer = sr.Recognizer()
20
  translator = Translator()
21
 
22
  def natural_language_understanding(text):
 
30
  translated = translator.translate(text, dest=target_language)
31
  return translated.text
32
 
33
+ def text_to_speech(text):
34
+ tts = gTTS(text)
35
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
36
+ tts.save(fp.name)
37
+ return fp.name
38
+
39
+ def process_input(input_text, input_audio, feature, target_language, output_language):
40
+ if input_audio is not None:
41
+ # Process audio input
42
+ try:
43
+ with sr.AudioFile(input_audio) as source:
44
+ audio = recognizer.record(source)
45
+ input_text = recognizer.recognize_google(audio)
46
+ except sr.UnknownValueError:
47
+ return "Could not understand audio", None
48
+ except sr.RequestError:
49
+ return "Could not request results from speech recognition service", None
50
+ except Exception as e:
51
+ return f"An error occurred: {str(e)}", None
52
+
53
  if not input_text:
54
+ return "No input provided", None
55
 
56
  processed_text = natural_language_understanding(input_text)
57
 
58
  if feature == "Translation":
59
  result = translate_text(processed_text, target_language)
60
+ elif feature == "Voice Command":
61
+ result = "Voice command feature not implemented in this example"
62
  elif feature == "Transcription":
63
  result = processed_text
64
  else:
65
  result = "Invalid feature selected"
66
 
67
+ if output_language:
68
+ result = translate_text(result, output_language)
69
+
70
+ return result, None
71
+
72
+ def tts_function(text):
73
+ if text:
74
+ return text_to_speech(text)
75
+ return None
76
 
77
  # Create Gradio interface
78
+ with gr.Blocks() as iface:
79
+ gr.Markdown("# The Advanced Multi-Faceted Chatbot")
80
+ gr.Markdown("Enter text or speak to interact with the chatbot. Choose a feature and specify languages for translation if needed.")
81
+
82
+ with gr.Row():
83
+ input_text = gr.Textbox(label="Input Text")
84
+ input_audio = gr.Audio(label="Input Audio", type="filepath")
85
+
86
+ with gr.Row():
87
+ feature = gr.Radio(["Translation", "Voice Command", "Transcription"], label="Feature")
88
+ target_language = gr.Textbox(label="Target Language")
89
+ output_language = gr.Textbox(label="Output Language")
90
+
91
+ submit_button = gr.Button("Process")
92
+ result_text = gr.Textbox(label="Result")
93
+ tts_button = gr.Button("Convert to Speech")
94
+ audio_output = gr.Audio(label="Audio Output")
95
+
96
+ submit_button.click(
97
+ process_input,
98
+ inputs=[input_text, input_audio, feature, target_language, output_language],
99
+ outputs=[result_text, audio_output]
100
+ )
101
+
102
+ tts_button.click(
103
+ tts_function,
104
+ inputs=[result_text],
105
+ outputs=[audio_output]
106
+ )
107
 
108
  # Launch the interface
109
  iface.launch(inline = False)