DreamStream-1 commited on
Commit
ff908a7
·
verified ·
1 Parent(s): 2af6061

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -34
app.py CHANGED
@@ -14,6 +14,7 @@ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipe
14
  from nltk.tokenize import word_tokenize
15
  from nltk.stem.lancaster import LancasterStemmer
16
  import os
 
17
 
18
  # Ensure necessary NLTK resources are downloaded
19
  nltk.download('punkt')
@@ -22,32 +23,29 @@ nltk.download('punkt')
22
  stemmer = LancasterStemmer()
23
 
24
  # Load intents.json
25
- try:
26
- with open("intents.json") as file:
27
- data = json.load(file)
28
- except FileNotFoundError:
29
- raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.")
30
 
31
  # Load preprocessed data from pickle
32
- try:
33
- with open("data.pickle", "rb") as f:
34
- words, labels, training, output = pickle.load(f)
35
- except FileNotFoundError:
36
- raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
37
 
38
  # Build the model structure
39
- net = tflearn.input_data(shape=[None, len(training[0])])
40
- net = tflearn.fully_connected(net, 8)
41
- net = tflearn.fully_connected(net, 8)
42
- net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
43
- net = tflearn.regression(net)
 
 
44
 
45
  # Load the trained model
46
- model = tflearn.DNN(net)
47
- try:
48
- model.load("MentalHealthChatBotmodel.tflearn")
49
- except FileNotFoundError:
50
- raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.")
51
 
52
  # Function to process user input into a bag-of-words format
53
  def bag_of_words(s, words):
@@ -61,7 +59,7 @@ def bag_of_words(s, words):
61
  return np.array(bag)
62
 
63
  # Chat function
64
- def chat(message, history):
65
  history = history or []
66
  message = message.lower()
67
 
@@ -86,7 +84,6 @@ def chat(message, history):
86
  history.append((message, response))
87
  return history, history
88
 
89
-
90
  # Sentiment analysis setup
91
  tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
92
  model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
@@ -148,26 +145,30 @@ def detect_emotion(user_input):
148
  # Google Geocoding API setup to convert city name to latitude/longitude
149
  geocode_url = "https://maps.googleapis.com/maps/api/geocode/json"
150
 
 
151
  def get_lat_lon(location, api_key):
152
  params = {
153
  "address": location,
154
  "key": api_key
155
  }
156
- response = requests.get(geocode_url, params=params)
157
- if response.status_code == 200:
 
158
  result = response.json()
159
  if result['status'] == 'OK':
160
- # Return the first result's latitude and longitude
161
  location = result['results'][0]['geometry']['location']
162
  return location['lat'], location['lng']
163
- return None, None
 
 
 
 
164
 
165
  # Get wellness professionals
166
  def get_wellness_professionals(location, api_key):
167
  query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist OR nutritionist OR wellness doctor OR holistic practitioner OR integrative medicine OR chiropractor OR naturopath"
168
  radius = 50000 # 50 km radius
169
 
170
- # Get the latitude and longitude from the location input
171
  lat, lon = get_lat_lon(location, api_key)
172
 
173
  if lat is None or lon is None:
@@ -210,8 +211,8 @@ def generate_map(wellness_data):
210
  return map_file
211
 
212
  # Gradio interface setup for user interaction
213
- def user_interface(message, location, history, api_key):
214
- history, history = chat(message, history)
215
 
216
  # Sentiment analysis
217
  inputs = tokenizer_sentiment(message, return_tensors="pt")
@@ -231,7 +232,24 @@ def user_interface(message, location, history, api_key):
231
  suggestions_df = pd.DataFrame(resources, columns=["Subject", "Article URL"])
232
  suggestions_df["Video URL"] = video_link # Add video URL column
233
 
234
- return history, history, sentiment, emotion, resources, video_link, map_file, suggestions_df.to_html(escape=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
  # Gradio chatbot interface
237
  chatbot = gr.Chatbot(label="Mental Health Chatbot")
@@ -239,13 +257,25 @@ location_input = gr.Textbox(label="Enter your location (latitude,longitude)", pl
239
 
240
  # Gradio interface definition
241
  demo = gr.Interface(
242
- user_interface,
243
- [gr.Textbox(label="Message"), location_input, "state", "text"],
244
- [chatbot, "state", "text", "text", "json", "text", "html", "html"], # Added additional output for the map
 
 
 
 
 
 
 
 
 
 
 
 
245
  allow_flagging="never",
246
  title="Mental Health & Well-being Assistant"
247
  )
248
 
249
  # Launch Gradio interface
250
  if __name__ == "__main__":
251
- demo.launch()
 
14
  from nltk.tokenize import word_tokenize
15
  from nltk.stem.lancaster import LancasterStemmer
16
  import os
17
+ from functools import lru_cache
18
 
19
  # Ensure necessary NLTK resources are downloaded
20
  nltk.download('punkt')
 
23
  stemmer = LancasterStemmer()
24
 
25
  # Load intents.json
26
+ def load_intents(file_path):
27
+ with open(file_path) as file:
28
+ return json.load(file)
 
 
29
 
30
  # Load preprocessed data from pickle
31
+ def load_preprocessed_data(file_path):
32
+ with open(file_path, "rb") as f:
33
+ return pickle.load(f)
 
 
34
 
35
  # Build the model structure
36
+ def build_model(words, labels, training, output):
37
+ net = tflearn.input_data(shape=[None, len(training[0])])
38
+ net = tflearn.fully_connected(net, 8)
39
+ net = tflearn.fully_connected(net, 8)
40
+ net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
41
+ net = tflearn.regression(net)
42
+ return tflearn.DNN(net)
43
 
44
  # Load the trained model
45
+ def load_model(model_path, net):
46
+ model = tflearn.DNN(net)
47
+ model.load(model_path)
48
+ return model
 
49
 
50
  # Function to process user input into a bag-of-words format
51
  def bag_of_words(s, words):
 
59
  return np.array(bag)
60
 
61
  # Chat function
62
+ def chat(message, history, words, labels, model):
63
  history = history or []
64
  message = message.lower()
65
 
 
84
  history.append((message, response))
85
  return history, history
86
 
 
87
  # Sentiment analysis setup
88
  tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
89
  model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
 
145
  # Google Geocoding API setup to convert city name to latitude/longitude
146
  geocode_url = "https://maps.googleapis.com/maps/api/geocode/json"
147
 
148
+ @lru_cache(maxsize=128)
149
  def get_lat_lon(location, api_key):
150
  params = {
151
  "address": location,
152
  "key": api_key
153
  }
154
+ try:
155
+ response = requests.get(geocode_url, params=params)
156
+ response.raise_for_status()
157
  result = response.json()
158
  if result['status'] == 'OK':
 
159
  location = result['results'][0]['geometry']['location']
160
  return location['lat'], location['lng']
161
+ else:
162
+ return None, None
163
+ except requests.RequestException as e:
164
+ print(f"Error fetching coordinates: {e}")
165
+ return None, None
166
 
167
  # Get wellness professionals
168
  def get_wellness_professionals(location, api_key):
169
  query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist OR nutritionist OR wellness doctor OR holistic practitioner OR integrative medicine OR chiropractor OR naturopath"
170
  radius = 50000 # 50 km radius
171
 
 
172
  lat, lon = get_lat_lon(location, api_key)
173
 
174
  if lat is None or lon is None:
 
211
  return map_file
212
 
213
  # Gradio interface setup for user interaction
214
+ def user_interface(message, location, history, api_key, words, labels, model):
215
+ history, history = chat(message, history, words, labels, model)
216
 
217
  # Sentiment analysis
218
  inputs = tokenizer_sentiment(message, return_tensors="pt")
 
232
  suggestions_df = pd.DataFrame(resources, columns=["Subject", "Article URL"])
233
  suggestions_df["Video URL"] = video_link # Add video URL column
234
 
235
+ return history, history, sentiment, emotion, suggestions_df.to_html(escape=False), map_file
236
+
237
+ # Load data and model
238
+ try:
239
+ data = load_intents("intents.json")
240
+ except FileNotFoundError:
241
+ raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.")
242
+
243
+ try:
244
+ words, labels, training, output = load_preprocessed_data("data.pickle")
245
+ except FileNotFoundError:
246
+ raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
247
+
248
+ net = build_model(words, labels, training, output)
249
+ try:
250
+ model = load_model("MentalHealthChatBotmodel.tflearn", net)
251
+ except FileNotFoundError:
252
+ raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.")
253
 
254
  # Gradio chatbot interface
255
  chatbot = gr.Chatbot(label="Mental Health Chatbot")
 
257
 
258
  # Gradio interface definition
259
  demo = gr.Interface(
260
+ fn=lambda message, location, history, api_key: user_interface(message, location, history, api_key, words, labels, model),
261
+ inputs=[
262
+ gr.Textbox(label="Message"),
263
+ location_input,
264
+ "state",
265
+ "text" # API Key input
266
+ ],
267
+ outputs=[
268
+ chatbot,
269
+ "state",
270
+ gr.Textbox(label="Sentiment"),
271
+ gr.Textbox(label="Emotion"),
272
+ gr.HTML(label="Resources"),
273
+ gr.HTML(label="Map")
274
+ ],
275
  allow_flagging="never",
276
  title="Mental Health & Well-being Assistant"
277
  )
278
 
279
  # Launch Gradio interface
280
  if __name__ == "__main__":
281
+ demo.launch()