Sephfox commited on
Commit
ea92796
·
verified ·
1 Parent(s): c852ed8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -22
app.py CHANGED
@@ -137,7 +137,7 @@ def evolve_emotions():
137
 
138
  population = toolbox.population(n=100)
139
  algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=50,
140
- stats=None, halloffame=None, verbose=False)
141
 
142
  best_individual = tools.selBest(population, k=1)[0]
143
  emotion_values = best_individual[:len(emotions)]
@@ -146,8 +146,7 @@ def evolve_emotions():
146
  for i, (emotion, data) in enumerate(emotions.items()):
147
  data['percentage'] = emotion_values[i]
148
  data['intensity'] = intensities[i]
149
-
150
- # Normalize percentages
151
  total = sum(e['percentage'] for e in emotions.values())
152
  for e in emotions:
153
  emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
@@ -196,16 +195,27 @@ def generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion
196
  # This could involve creating an image or a visualization using Matplotlib/Seaborn
197
  # The generated image should be saved and returned as the output
198
  emotion_visualization_path = 'emotional_state.png'
199
- # Generate and save the emotion visualization
 
 
 
 
 
 
 
 
 
 
 
200
  return emotion_visualization_path
201
 
202
- def generate_response(input_text, ai_emotion, conversation_history):
203
- # Prepare a prompt based on the current emotion and input
204
- prompt = f"As an AI assistant, I am currently feeling {ai_emotion}. My response will reflect this emotional state. Human: {input_text}\nAI:"
205
 
206
  # Add conversation history to the prompt
207
  for entry in conversation_history[-100:]: # Use last 100 entries for context
208
- prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
209
 
210
  inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=8192)
211
 
@@ -232,8 +242,7 @@ def generate_response(input_text, ai_emotion, conversation_history):
232
  response = response_tokenizer.decode(response_ids[0], skip_special_tokens=True)
233
 
234
  # Extract only the AI's response
235
- response = response.split("AI:")[-1].strip()
236
- return response
237
 
238
  def interactive_interface(input_text):
239
  # Perform your processing logic here
@@ -243,24 +252,24 @@ def interactive_interface(input_text):
243
  text_complexity = analyze_text_complexity(input_text)
244
  ai_emotion, ai_emotion_percentage, ai_emotion_intensity = get_ai_emotion(input_text)
245
  emotion_visualization = generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity)
246
- response = generate_response(input_text, ai_emotion, conversation_history)
247
 
248
  # Update conversation history
249
- conversation_history.append({'user': input_text, 'response': response})
250
  if len(conversation_history) > max_history_length:
251
  conversation_history.pop(0)
252
 
253
  # Return the expected outputs in the correct order
254
  return (
255
- gr.Textbox(predicted_emotion),
256
- gr.Textbox(str(sentiment_scores)),
257
- gr.Textbox(str(entities)),
258
- gr.Textbox(str(text_complexity)),
259
- gr.Textbox(ai_emotion),
260
- gr.Textbox(str(ai_emotion_percentage)),
261
- gr.Textbox(str(ai_emotion_intensity)),
262
- gr.Image(emotion_visualization),
263
- gr.Textbox(response)
264
  )
265
 
266
  # 443 additional features
@@ -314,4 +323,4 @@ iface = gr.Interface(
314
  description="An AI assistant that can analyze the emotional content of text and generate responses based on its emotional state.",
315
  )
316
 
317
- iface.launch()
 
137
 
138
  population = toolbox.population(n=100)
139
  algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=50,
140
+ stats=None, halloffame=None, verbose=False)
141
 
142
  best_individual = tools.selBest(population, k=1)[0]
143
  emotion_values = best_individual[:len(emotions)]
 
146
  for i, (emotion, data) in enumerate(emotions.items()):
147
  data['percentage'] = emotion_values[i]
148
  data['intensity'] = intensities[i]
149
+ # Normalize percentages
 
150
  total = sum(e['percentage'] for e in emotions.values())
151
  for e in emotions:
152
  emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
 
195
  # This could involve creating an image or a visualization using Matplotlib/Seaborn
196
  # The generated image should be saved and returned as the output
197
  emotion_visualization_path = 'emotional_state.png'
198
+ try:
199
+ # Generate and save the emotion visualization
200
+ plt.figure(figsize=(6, 6))
201
+ sns.barplot(x=['Emotion'], y=[ai_emotion_percentage], color=sns.color_palette()[emotions.index(ai_emotion)])
202
+ plt.title(f'Current Emotional State: {ai_emotion.capitalize()}')
203
+ plt.xlabel('Emotion')
204
+ plt.ylabel('Percentage')
205
+ plt.savefig(emotion_visualization_path)
206
+ plt.close()
207
+ except Exception as e:
208
+ print(f"Error generating emotion visualization: {e}")
209
+ emotion_visualization_path = None
210
  return emotion_visualization_path
211
 
212
+ def generate_response(ai_emotion, conversation_history):
213
+ # Prepare a prompt based on the current emotion
214
+ prompt = f"As an AI assistant, I am currently feeling {ai_emotion}. My response will reflect this emotional state."
215
 
216
  # Add conversation history to the prompt
217
  for entry in conversation_history[-100:]: # Use last 100 entries for context
218
+ prompt = f"My previous response: {entry['response']}\n" + prompt
219
 
220
  inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=8192)
221
 
 
242
  response = response_tokenizer.decode(response_ids[0], skip_special_tokens=True)
243
 
244
  # Extract only the AI's response
245
+ return response.strip()
 
246
 
247
  def interactive_interface(input_text):
248
  # Perform your processing logic here
 
252
  text_complexity = analyze_text_complexity(input_text)
253
  ai_emotion, ai_emotion_percentage, ai_emotion_intensity = get_ai_emotion(input_text)
254
  emotion_visualization = generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity)
255
+ response = generate_response(ai_emotion, conversation_history)
256
 
257
  # Update conversation history
258
+ conversation_history.append({'response': response})
259
  if len(conversation_history) > max_history_length:
260
  conversation_history.pop(0)
261
 
262
  # Return the expected outputs in the correct order
263
  return (
264
+ gr.Textbox(value=predicted_emotion, label="Predicted Emotion"),
265
+ gr.Textbox(value=str(sentiment_scores), label="Sentiment Scores"),
266
+ gr.Textbox(value=str(entities), label="Extracted Entities"),
267
+ gr.Textbox(value=str(text_complexity), label="Text Complexity"),
268
+ gr.Textbox(value=ai_emotion, label="AI Emotion"),
269
+ gr.Textbox(value=str(ai_emotion_percentage), label="AI Emotion Percentage"),
270
+ gr.Textbox(value=str(ai_emotion_intensity), label="AI Emotion Intensity"),
271
+ gr.Image(value=emotion_visualization, label="Emotion Visualization"),
272
+ gr.Textbox(value=response, label="AI Response")
273
  )
274
 
275
  # 443 additional features
 
323
  description="An AI assistant that can analyze the emotional content of text and generate responses based on its emotional state.",
324
  )
325
 
326
+ iface.launch()