Sephfox commited on
Commit
89c18ff
·
verified ·
1 Parent(s): c91e004

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -72
app.py CHANGED
@@ -20,12 +20,9 @@ import seaborn as sns
20
 
21
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
22
 
23
- # Download necessary NLTK data
24
- nltk.download('vader_lexicon', quiet=True)
25
- nltk.download('punkt', quiet=True)
26
- nltk.download('averaged_perceptron_tagger', quiet=True)
27
- nltk.download('maxent_ne_chunker', quiet=True)
28
- nltk.download('words', quiet=True)
29
 
30
  # Initialize Example Dataset (For Emotion Prediction)
31
  data = {
@@ -148,17 +145,7 @@ def evolve_emotions():
148
  best_individual = tools.selBest(population, k=1)[0]
149
  emotion_values = best_individual[:len(emotions)]
150
  intensities = best_individual[len(emotions):]
151
-
152
- for i, (emotion, data) in enumerate(emotions.items()):
153
- data['percentage'] = emotion_values[i]
154
- data['intensity'] = intensities[i]
155
-
156
- # Normalize percentages
157
- total = sum(e['percentage'] for e in emotions.values())
158
- for e in emotions:
159
- emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
160
-
161
- def predict_emotion(context):
162
  load_models()
163
  inputs = emotion_prediction_tokenizer(context, return_tensors="pt", truncation=True, max_length=512)
164
  with torch.no_grad():
@@ -174,6 +161,22 @@ def sentiment_analysis(text):
174
  return sentiment_scores
175
 
176
  def extract_entities(text):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  chunked = ne_chunk(pos_tag(word_tokenize(text)))
178
  entities = []
179
  for chunk in chunked:
@@ -266,68 +269,23 @@ def interactive_interface(input_text):
266
  if len(conversation_history) > max_history_length:
267
  conversation_history.pop(0)
268
 
 
 
 
269
  # Return the expected outputs in the correct order
270
  return (
271
  gr.Textbox(value=predicted_emotion, label="Predicted Emotion"),
272
  gr.Textbox(value=str(sentiment_scores), label="Sentiment Scores"),
273
  gr.Textbox(value=str(entities), label="Extracted Entities"),
274
  gr.Textbox(value=str(text_complexity), label="Text Complexity"),
275
- gr.Textbox(value=ai_emotion, label="AI Emotion"),
276
- gr.Textbox(value=f"{ai_emotion_percentage:.2f}%", label="AI Emotion Percentage"),
277
- gr.Textbox(value=f"{ai_emotion_intensity:.2f}", label="AI Emotion Intensity"),
278
- gr.Image(value=emotion_visualization, label="Emotion Visualization"),
279
- gr.Textbox(value=response, label="AI Response")
280
  )
281
 
282
- # 443 additional features
283
- additional_features = {}
284
- for i in range(443):
285
- additional_features[f'feature_{i+1}'] = 0
286
-
287
- def feature_transformations():
288
- global additional_features
289
- for feature in additional_features:
290
- additional_features[feature] += random.uniform(-1, 1)
291
-
292
- def visualize_emotions():
293
- emotions_df = pd.DataFrame([(e, d['percentage'], d['intensity']) for e, d in emotions.items()],
294
- columns=['emotion', 'percentage', 'intensity'])
295
-
296
- plt.figure(figsize=(12, 6))
297
- sns.barplot(x='emotion', y='percentage', data=emotions_df)
298
- plt.title('Emotion Percentages')
299
- plt.xlabel('Emotion')
300
- plt.ylabel('Percentage')
301
- plt.xticks(rotation=90)
302
- plt.savefig('emotion_percentages.png')
303
-
304
- plt.figure(figsize=(12, 6))
305
- sns.barplot(x='emotion', y='intensity', data=emotions_df)
306
- plt.title('Emotion Intensities')
307
- plt.xlabel('Emotion')
308
- plt.ylabel('Intensity')
309
- plt.xticks(rotation=90)
310
- plt.savefig('emotion_intensities.png')
311
-
312
- return 'emotion_percentages.png', 'emotion_intensities.png'
313
-
314
  # Create the Gradio interface
315
- iface = gr.Interface(
316
- fn=interactive_interface,
317
- inputs=gr.Textbox(label="Input Text"),
318
- outputs=[
319
- gr.Textbox(label="Predicted Emotion"),
320
- gr.Textbox(label="Sentiment Scores"),
321
- gr.Textbox(label="Extracted Entities"),
322
- gr.Textbox(label="Text Complexity"),
323
- gr.Textbox(label="AI Emotion"),
324
- gr.Textbox(label="AI Emotion Percentage"),
325
- gr.Textbox(label="AI Emotion Intensity"),
326
- gr.Image(label="Emotion Visualization"),
327
- gr.Textbox(label="AI Response")
328
- ],
329
- title="Emotional AI Assistant",
330
- description="An AI assistant that can analyze the emotional content of text and generate responses based on its emotional state.",
331
- )
332
 
333
- iface.launch()
 
 
20
 
21
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
22
 
23
+ # Download necessary NLTK data (lazy loading)
24
+ nltk.download('vader_lexicon', quiet=True, raise_on_error=True)
25
+ nltk.data.path.append('/nltk_data') # Specify the path where NLTK data should be stored
 
 
 
26
 
27
  # Initialize Example Dataset (For Emotion Prediction)
28
  data = {
 
145
  best_individual = tools.selBest(population, k=1)[0]
146
  emotion_values = best_individual[:len(emotions)]
147
  intensities = best_individual[len(emotions):]
148
+ def predict_emotion(context):
 
 
 
 
 
 
 
 
 
 
149
  load_models()
150
  inputs = emotion_prediction_tokenizer(context, return_tensors="pt", truncation=True, max_length=512)
151
  with torch.no_grad():
 
161
  return sentiment_scores
162
 
163
  def extract_entities(text):
164
+ # Lazy load the necessary NLTK data
165
+ try:
166
+ nltk.data.find('tokenizers/punkt')
167
+ except LookupError:
168
+ nltk.download('punkt', quiet=True, raise_on_error=True)
169
+
170
+ try:
171
+ nltk.data.find('taggers/averaged_perceptron_tagger')
172
+ except LookupError:
173
+ nltk.download('averaged_perceptron_tagger', quiet=True, raise_on_error=True)
174
+
175
+ try:
176
+ nltk.data.find('chunkers/maxent_ne_chunker')
177
+ except LookupError:
178
+ nltk.download('maxent_ne_chunker', quiet=True, raise_on_error=True)
179
+
180
  chunked = ne_chunk(pos_tag(word_tokenize(text)))
181
  entities = []
182
  for chunk in chunked:
 
269
  if len(conversation_history) > max_history_length:
270
  conversation_history.pop(0)
271
 
272
+ # Save conversation history to a file
273
+ save_historical_data(conversation_history)
274
+
275
  # Return the expected outputs in the correct order
276
  return (
277
  gr.Textbox(value=predicted_emotion, label="Predicted Emotion"),
278
  gr.Textbox(value=str(sentiment_scores), label="Sentiment Scores"),
279
  gr.Textbox(value=str(entities), label="Extracted Entities"),
280
  gr.Textbox(value=str(text_complexity), label="Text Complexity"),
281
+ gr.Textbox(value=response, label="AI Response"),
282
+ gr.Image(value=emotion_visualization, label="Emotion Visualization")
 
 
 
283
  )
284
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
  # Create the Gradio interface
286
+ iface = gr.Interface(fn=interactive_interface, inputs="text", outputs=[
287
+ "text", "text", "text", "text", "text", "image"
288
+ ], title="Emotion-Aware AI Assistant")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
 
290
+ # Launch the interface
291
+ iface.launch()