Sephfox commited on
Commit
1ebd803
·
verified ·
1 Parent(s): ce3343c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -116
app.py CHANGED
@@ -144,34 +144,60 @@ def evolve_emotions():
144
  intensities = best_individual[len(emotions):]
145
 
146
  for i, (emotion, data) in enumerate(emotions.items()):
147
- data['percentage'] = emotion_values[i]
148
  data['intensity'] = intensities[i]
149
 
150
- # Normalize percentages
151
  total = sum(e['percentage'] for e in emotions.values())
152
  for e in emotions:
153
  emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
154
 
155
- def update_emotion_history(emotion, percentage, intensity, context):
156
- entry = {
157
- 'emotion': emotion,
158
- 'percentage': percentage,
159
- 'intensity': intensity,
160
- 'context': context,
161
- 'timestamp': pd.Timestamp.now().isoformat()
162
- }
163
- emotion_history.append(entry)
164
- save_historical_data(emotion_history)
165
 
166
- # Adding 443 features
167
- additional_features = {}
168
- for i in range(443):
169
- additional_features[f'feature_{i+1}'] = 0
170
 
171
- def feature_transformations():
172
- global additional_features
173
- for feature in additional_features:
174
- additional_features[feature] += random.uniform(-1, 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
  def generate_response(input_text, ai_emotion, conversation_history):
177
  # Prepare a prompt based on the current emotion and input
@@ -209,118 +235,83 @@ def generate_response(input_text, ai_emotion, conversation_history):
209
  response = response.split("AI:")[-1].strip()
210
  return response
211
 
212
- def predict_emotion(context):
213
- inputs = emotion_prediction_tokenizer(context, return_tensors="pt", truncation=True, max_length=512)
214
- with torch.no_grad():
215
- outputs = emotion_prediction_model(**inputs)
216
- probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
217
- predicted_class = torch.argmax(probabilities, dim=-1).item()
218
- emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
219
- return emotion_labels[predicted_class]
220
-
221
- def sentiment_analysis(text):
222
- sia = SentimentIntensityAnalyzer()
223
- sentiment_scores = sia.polarity_scores(text)
224
- return sentiment_scores
225
-
226
- def extract_entities(text):
227
- chunked = ne_chunk(pos_tag(word_tokenize(text)))
228
- entities = []
229
- for chunk in chunked:
230
- if hasattr(chunk, 'label'):
231
- entities.append(((' '.join(c[0] for c in chunk)), chunk.label()))
232
- return entities
 
 
 
 
 
 
 
 
 
 
 
233
 
234
- def analyze_text_complexity(text):
235
- blob = TextBlob(text)
236
- return {
237
- 'word_count': len(blob.words),
238
- 'sentence_count': len(blob.sentences),
239
- 'average_sentence_length': len(blob.words) / len(blob.sentences) if len(blob.sentences) > 0 else 0,
240
- 'polarity': blob.sentiment.polarity,
241
- 'subjectivity': blob.sentiment.subjectivity
242
- }
243
 
244
  def visualize_emotions():
245
  emotions_df = pd.DataFrame([(e, d['percentage'], d['intensity']) for e, d in emotions.items()],
246
- columns=['Emotion', 'Percentage', 'Intensity'])
247
-
248
  plt.figure(figsize=(12, 6))
249
- sns.barplot(x='Emotion', y='Percentage', data=emotions_df)
250
- plt.title('Current Emotional State of the AI')
251
- plt.xticks(rotation=45, ha='right')
252
- plt.tight_layout()
253
- plt.savefig('emotional_state.png')
254
- plt.close()
255
 
256
- return 'emotional_state.png'
 
 
 
 
 
 
257
 
258
- def interactive_interface(input_text):
259
- global conversation_history
260
- try:
261
- evolve_emotions()
262
- predicted_emotion = predict_emotion(input_text)
263
- sentiment_scores = sentiment_analysis(input_text)
264
- entities = extract_entities(input_text)
265
- text_complexity = analyze_text_complexity(input_text)
266
-
267
- # Update AI's emotional state based on input
268
- update_emotion(predicted_emotion, random.uniform(5, 15), random.uniform(0, 10))
269
-
270
- # Determine AI's current dominant emotion
271
- ai_emotion = max(emotions, key=lambda e: emotions[e]['percentage'])
272
-
273
- # Generate response based on AI's emotion
274
- response = generate_response(input_text, ai_emotion, conversation_history)
275
-
276
- # Update conversation history
277
- conversation_history.append({
278
- 'user': input_text,
279
- 'response': response
280
- })
281
-
282
- # Trim conversation history if it exceeds the maximum length
283
- if len(conversation_history) > max_history_length:
284
- conversation_history = conversation_history[-max_history_length:]
285
-
286
- update_emotion_history(ai_emotion, emotions[ai_emotion]['percentage'], emotions[ai_emotion]['intensity'], input_text)
287
- feature_transformations()
288
-
289
- emotion_visualization = visualize_emotions()
290
-
291
- analysis_result = {
292
- 'predicted_user_emotion': predicted_emotion,
293
- 'sentiment_scores': sentiment_scores,
294
- 'entities': entities,
295
- 'text_complexity': text_complexity,
296
- 'ai_emotion': ai_emotion,
297
- 'ai_emotion_percentage': emotions[ai_emotion]['percentage'],
298
- 'ai_emotion_intensity': emotions[ai_emotion]['intensity'],
299
- 'emotion_visualization': emotion_visualization
300
- }
301
-
302
- return analysis_result
303
- except Exception as e:
304
- print(f"Error: {e}")
305
- return {'error': str(e)}
306
-
307
- # Define the Gradio interface
308
- demo = gr.Interface(
309
  fn=interactive_interface,
310
- inputs=gr.Textbox(label="Enter your message"),
311
  outputs=[
312
- gr.Textbox(label="Predicted User Emotion"),
313
  gr.Textbox(label="Sentiment Scores"),
314
  gr.Textbox(label="Extracted Entities"),
315
  gr.Textbox(label="Text Complexity"),
316
  gr.Textbox(label="AI Emotion"),
317
  gr.Textbox(label="AI Emotion Percentage"),
318
  gr.Textbox(label="AI Emotion Intensity"),
319
- gr.Image(label="Emotional State Visualization")
 
320
  ],
321
  title="Emotional AI Assistant",
322
- description="Interact with an AI assistant that adapts its responses based on its emotional state."
323
  )
324
 
325
- # Launch the Gradio interface
326
- demo.launch()
 
144
  intensities = best_individual[len(emotions):]
145
 
146
  for i, (emotion, data) in enumerate(emotions.items()):
147
+ data['percentage'] = emotion_values[i]
148
  data['intensity'] = intensities[i]
149
 
150
+ # Normalize percentages
151
  total = sum(e['percentage'] for e in emotions.values())
152
  for e in emotions:
153
  emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
154
 
155
+ def predict_emotion(context):
156
+ inputs = emotion_prediction_tokenizer(context, return_tensors="pt", truncation=True, max_length=512)
157
+ with torch.no_grad():
158
+ outputs = emotion_prediction_model(**inputs)
159
+ probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
160
+ predicted_class = torch.argmax(probabilities, dim=-1).item()
161
+ emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
162
+ return emotion_labels[predicted_class]
 
 
163
 
164
+ def sentiment_analysis(text):
165
+ sia = SentimentIntensityAnalyzer()
166
+ sentiment_scores = sia.polarity_scores(text)
167
+ return sentiment_scores
168
 
169
+ def extract_entities(text):
170
+ chunked = ne_chunk(pos_tag(word_tokenize(text)))
171
+ entities = []
172
+ for chunk in chunked:
173
+ if hasattr(chunk, 'label'):
174
+ entities.append(((' '.join(c[0] for c in chunk)), chunk.label()))
175
+ return entities
176
+
177
+ def analyze_text_complexity(text):
178
+ blob = TextBlob(text)
179
+ return {
180
+ 'word_count': len(blob.words),
181
+ 'sentence_count': len(blob.sentences),
182
+ 'average_sentence_length': len(blob.words) / len(blob.sentences) if len(blob.sentences) > 0 else 0,
183
+ 'polarity': blob.sentiment.polarity,
184
+ 'subjectivity': blob.sentiment.subjectivity
185
+ }
186
+
187
+ def get_ai_emotion(input_text):
188
+ predicted_emotion = predict_emotion(input_text)
189
+ ai_emotion = predicted_emotion
190
+ ai_emotion_percentage = emotions[predicted_emotion]['percentage']
191
+ ai_emotion_intensity = emotions[predicted_emotion]['intensity']
192
+ return ai_emotion, ai_emotion_percentage, ai_emotion_intensity
193
+
194
+ def generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity):
195
+ # Generate an emotion visualization based on the AI's emotional state
196
+ # This could involve creating an image or a visualization using Matplotlib/Seaborn
197
+ # The generated image should be saved and returned as the output
198
+ emotion_visualization_path = 'emotional_state.png'
199
+ # Generate and save the emotion visualization
200
+ return emotion_visualization_path
201
 
202
  def generate_response(input_text, ai_emotion, conversation_history):
203
  # Prepare a prompt based on the current emotion and input
 
235
  response = response.split("AI:")[-1].strip()
236
  return response
237
 
238
+ def interactive_interface(input_text):
239
+ # Perform your processing logic here
240
+ predicted_emotion = predict_emotion(input_text)
241
+ sentiment_scores = sentiment_analysis(input_text)
242
+ entities = extract_entities(input_text)
243
+ text_complexity = analyze_text_complexity(input_text)
244
+ ai_emotion, ai_emotion_percentage, ai_emotion_intensity = get_ai_emotion(input_text)
245
+ emotion_visualization = generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity)
246
+ response = generate_response(input_text, ai_emotion, conversation_history)
247
+
248
+ # Update conversation history
249
+ conversation_history.append({'user': input_text, 'response': response})
250
+ if len(conversation_history) > max_history_length:
251
+ conversation_history.pop(0)
252
+
253
+ # Return the expected outputs in the correct order
254
+ return (
255
+ gr.Textbox(predicted_emotion),
256
+ gr.Textbox(str(sentiment_scores)),
257
+ gr.Textbox(str(entities)),
258
+ gr.Textbox(str(text_complexity)),
259
+ gr.Textbox(ai_emotion),
260
+ gr.Textbox(str(ai_emotion_percentage)),
261
+ gr.Textbox(str(ai_emotion_intensity)),
262
+ gr.Image(emotion_visualization),
263
+ gr.Textbox(response)
264
+ )
265
+
266
+ # 443 additional features
267
+ additional_features = {}
268
+ for i in range(443):
269
+ additional_features[f'feature_{i+1}'] = 0
270
 
271
+ def feature_transformations():
272
+ global additional_features
273
+ for feature in additional_features:
274
+ additional_features[feature] += random.uniform(-1, 1)
 
 
 
 
 
275
 
276
  def visualize_emotions():
277
  emotions_df = pd.DataFrame([(e, d['percentage'], d['intensity']) for e, d in emotions.items()],
278
+ columns=['emotion', 'percentage', 'intensity'])
279
+
280
  plt.figure(figsize=(12, 6))
281
+ sns.barplot(x='emotion', y='percentage', data=emotions_df)
282
+ plt.title('Emotion Percentages')
283
+ plt.xlabel('Emotion')
284
+ plt.ylabel('Percentage')
285
+ plt.xticks(rotation=90)
286
+ plt.savefig('emotion_percentages.png')
287
 
288
+ plt.figure(figsize=(12, 6))
289
+ sns.barplot(x='emotion', y='intensity', data=emotions_df)
290
+ plt.title('Emotion Intensities')
291
+ plt.xlabel('Emotion')
292
+ plt.ylabel('Intensity')
293
+ plt.xticks(rotation=90)
294
+ plt.savefig('emotion_intensities.png')
295
 
296
+ return 'emotion_percentages.png', 'emotion_intensities.png'
297
+
298
+ # Create the Gradio interface
299
+ iface = gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300
  fn=interactive_interface,
301
+ inputs=gr.Textbox(label="Input Text"),
302
  outputs=[
303
+ gr.Textbox(label="Predicted Emotion"),
304
  gr.Textbox(label="Sentiment Scores"),
305
  gr.Textbox(label="Extracted Entities"),
306
  gr.Textbox(label="Text Complexity"),
307
  gr.Textbox(label="AI Emotion"),
308
  gr.Textbox(label="AI Emotion Percentage"),
309
  gr.Textbox(label="AI Emotion Intensity"),
310
+ gr.Image(label="Emotion Visualization"),
311
+ gr.Textbox(label="AI Response")
312
  ],
313
  title="Emotional AI Assistant",
314
+ description="An AI assistant that can analyze the emotional content of text and generate responses based on its emotional state.",
315
  )
316
 
317
+ iface.launch()