RTHGV commited on
Commit
ab41f33
·
verified ·
1 Parent(s): 6bcffa7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -61
app.py CHANGED
@@ -1,24 +1,15 @@
1
  import gradio as gr
2
- import speech_recognition as sr
3
- from googletrans import Translator
4
- from textblob import TextBlob
5
  import nltk
6
  from nltk.tokenize import word_tokenize
7
  from nltk.corpus import stopwords
8
  from nltk.stem import WordNetLemmatizer
9
- from gtts import gTTS
10
- import tempfile
11
- import os
12
 
13
  # Download necessary NLTK data
14
  nltk.download('punkt', quiet=True)
15
  nltk.download('stopwords', quiet=True)
16
  nltk.download('wordnet', quiet=True)
17
 
18
- # Initialize components
19
- recognizer = sr.Recognizer()
20
- translator = Translator()
21
-
22
  def natural_language_understanding(text):
23
  tokens = word_tokenize(text.lower())
24
  stop_words = set(stopwords.words('english'))
@@ -27,82 +18,42 @@ def natural_language_understanding(text):
27
  return " ".join(processed_tokens)
28
 
29
  def translate_text(text, target_language):
30
- translated = translator.translate(text, dest=target_language)
31
- return translated.text
32
-
33
- def text_to_speech(text):
34
- tts = gTTS(text)
35
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
36
- tts.save(fp.name)
37
- return fp.name
38
-
39
- def process_input(input_text, input_audio, feature, target_language, output_language):
40
- if input_audio is not None:
41
- # Process audio input
42
- try:
43
- with sr.AudioFile(input_audio) as source:
44
- audio = recognizer.record(source)
45
- input_text = recognizer.recognize_google(audio)
46
- except sr.UnknownValueError:
47
- return "Could not understand audio", None
48
- except sr.RequestError:
49
- return "Could not request results from speech recognition service", None
50
- except Exception as e:
51
- return f"An error occurred: {str(e)}", None
52
 
 
53
  if not input_text:
54
- return "No input provided", None
55
 
56
  processed_text = natural_language_understanding(input_text)
57
 
58
  if feature == "Translation":
59
  result = translate_text(processed_text, target_language)
60
- elif feature == "Voice Command":
61
- result = "Voice command feature not implemented in this example"
62
  elif feature == "Transcription":
63
  result = processed_text
64
  else:
65
  result = "Invalid feature selected"
66
 
67
- if output_language:
68
- result = translate_text(result, output_language)
69
-
70
- return result, None
71
-
72
- def tts_function(text):
73
- if text:
74
- return text_to_speech(text)
75
- return None
76
 
77
  # Create Gradio interface
78
  with gr.Blocks() as demo:
79
  gr.Markdown("# The Advanced Multi-Faceted Chatbot")
80
- gr.Markdown("Enter text or speak to interact with the chatbot. Choose a feature and specify languages for translation if needed.")
81
 
82
- with gr.Row():
83
- input_text = gr.Textbox(label="Input Text")
84
- input_audio = gr.Audio(label="Input Audio", type="filepath")
85
 
86
  with gr.Row():
87
- feature = gr.Radio(["Translation", "Voice Command", "Transcription"], label="Feature")
88
- target_language = gr.Textbox(label="Target Language ")
89
- output_language = gr.Textbox(label="Output Language ")
90
 
91
  submit_button = gr.Button("Process")
92
  result_text = gr.Textbox(label="Result")
93
- tts_button = gr.Button("Convert to Speech")
94
- audio_output = gr.Audio(label="Audio Output")
95
 
96
  submit_button.click(
97
  process_input,
98
- inputs=[input_text, input_audio, feature, target_language, output_language],
99
- outputs=[result_text, audio_output]
100
- )
101
-
102
- tts_button.click(
103
- tts_function,
104
- inputs=[result_text],
105
- outputs=[audio_output]
106
  )
107
 
108
  # Launch the interface
 
1
  import gradio as gr
2
+ from deep_translator import GoogleTranslator
 
 
3
  import nltk
4
  from nltk.tokenize import word_tokenize
5
  from nltk.corpus import stopwords
6
  from nltk.stem import WordNetLemmatizer
 
 
 
7
 
8
  # Download necessary NLTK data
9
  nltk.download('punkt', quiet=True)
10
  nltk.download('stopwords', quiet=True)
11
  nltk.download('wordnet', quiet=True)
12
 
 
 
 
 
13
  def natural_language_understanding(text):
14
  tokens = word_tokenize(text.lower())
15
  stop_words = set(stopwords.words('english'))
 
18
  return " ".join(processed_tokens)
19
 
20
  def translate_text(text, target_language):
21
+ translator = GoogleTranslator(source='auto', target=target_language)
22
+ return translator.translate(text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ def process_input(input_text, feature, target_language):
25
  if not input_text:
26
+ return "No input provided"
27
 
28
  processed_text = natural_language_understanding(input_text)
29
 
30
  if feature == "Translation":
31
  result = translate_text(processed_text, target_language)
 
 
32
  elif feature == "Transcription":
33
  result = processed_text
34
  else:
35
  result = "Invalid feature selected"
36
 
37
+ return result
 
 
 
 
 
 
 
 
38
 
39
  # Create Gradio interface
40
  with gr.Blocks() as demo:
41
  gr.Markdown("# The Advanced Multi-Faceted Chatbot")
42
+ gr.Markdown("Enter text to interact with the chatbot. Choose a feature and specify language for translation if needed.")
43
 
44
+ input_text = gr.Textbox(label="Input Text")
 
 
45
 
46
  with gr.Row():
47
+ feature = gr.Radio(["Translation", "Transcription"], label="Feature")
48
+ target_language = gr.Textbox(label="Target Language (e.g., 'fr' for French)")
 
49
 
50
  submit_button = gr.Button("Process")
51
  result_text = gr.Textbox(label="Result")
 
 
52
 
53
  submit_button.click(
54
  process_input,
55
+ inputs=[input_text, feature, target_language],
56
+ outputs=result_text
 
 
 
 
 
 
57
  )
58
 
59
  # Launch the interface