Sephfox commited on
Commit
c0ba949
·
verified ·
1 Parent(s): 3e002ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -65
app.py CHANGED
@@ -6,7 +6,6 @@ import json
6
  import random
7
  import gradio as gr
8
  import torch
9
- from sklearn.model_selection import train_test_split
10
  from sklearn.preprocessing import OneHotEncoder
11
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
12
  from deap import base, creator, tools, algorithms
@@ -50,6 +49,8 @@ try:
50
  encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=True)
51
  except TypeError:
52
  encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
 
 
53
  # Encoding emotions
54
  emotions_target = pd.Categorical(df['emotion']).codes
55
  emotion_classes = pd.Categorical(df['emotion']).categories
@@ -66,32 +67,18 @@ response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
66
  # Enhanced Emotional States
67
  emotions = {
68
  'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
69
- 'pleasure': {'percentage': 10, 'motivation': 'selfish', 'intensity': 0},
70
  'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
71
- 'grief': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
72
  'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
73
- 'calmness': {'percentage': 10, 'motivation': 'neutral', 'intensity': 0},
74
- 'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
75
- 'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
76
- 'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
77
- 'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
78
- 'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
79
  'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
 
80
  'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
81
- 'anticipation': {'percentage': 10, 'motivation': 'predictive', 'intensity': 0},
82
- 'trust': {'percentage': 10, 'motivation': 'reliable', 'intensity': 0},
83
- 'disgust': {'percentage': 10, 'motivation': 'repulsive', 'intensity': 0},
84
- 'optimism': {'percentage': 10, 'motivation': 'hopeful', 'intensity': 0},
85
- 'pessimism': {'percentage': 10, 'motivation': 'doubtful', 'intensity': 0},
86
- 'boredom': {'percentage': 10, 'motivation': 'indifferent', 'intensity': 0},
87
- 'envy': {'percentage': 10, 'motivation': 'jealous', 'intensity': 0},
88
- 'neutral': {'percentage': 10, 'motivation': 'balanced', 'intensity': 0},
89
- 'wit': {'percentage': 15, 'motivation': 'clever', 'intensity': 0},
90
- 'curiosity': {'percentage': 20, 'motivation': 'inquisitive', 'intensity': 0},
91
  }
92
 
93
- total_percentage = 200
94
  emotion_history_file = 'emotion_history.json'
 
 
95
 
96
  def load_historical_data(file_path=emotion_history_file):
97
  if os.path.exists(file_path):
@@ -106,22 +93,13 @@ def save_historical_data(historical_data, file_path=emotion_history_file):
106
  emotion_history = load_historical_data()
107
 
108
  def update_emotion(emotion, percentage, intensity):
109
- if percentage > emotions['ideal_state']['percentage']:
110
- percentage = emotions['ideal_state']['percentage']
111
-
112
- emotions['ideal_state']['percentage'] -= percentage
113
  emotions[emotion]['percentage'] += percentage
114
  emotions[emotion]['intensity'] = intensity
115
 
116
- # Introduce some randomness in emotional evolution
 
117
  for e in emotions:
118
- if e != emotion and e != 'ideal_state':
119
- change = random.uniform(-2, 2)
120
- emotions[e]['percentage'] = max(0, emotions[e]['percentage'] + change)
121
-
122
- total_current = sum(e['percentage'] for e in emotions.values())
123
- adjustment = total_percentage - total_current
124
- emotions['ideal_state']['percentage'] += adjustment
125
 
126
  def normalize_context(context):
127
  return context.lower().strip()
@@ -131,24 +109,22 @@ creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
131
  creator.create("Individual", list, fitness=creator.FitnessMulti)
132
 
133
  def evaluate(individual):
134
- emotion_values = individual[:len(emotions) - 1]
135
- intensities = individual[len(emotions) - 1:-1]
136
- ideal_state = individual[-1]
137
 
138
- ideal_diff = abs(100 - ideal_state)
139
- sum_non_ideal = sum(emotion_values)
140
  intensity_range = max(intensities) - min(intensities)
 
141
 
142
- return ideal_diff, sum_non_ideal, intensity_range
143
 
144
  def evolve_emotions():
145
  toolbox = base.Toolbox()
146
- toolbox.register("attr_float", random.uniform, 0, 20)
147
  toolbox.register("attr_intensity", random.uniform, 0, 10)
148
  toolbox.register("individual", tools.initCycle, creator.Individual,
149
- (toolbox.attr_float,) * (len(emotions) - 1) +
150
- (toolbox.attr_intensity,) * (len(emotions) - 1) +
151
- (lambda: 100,), n=1)
152
  toolbox.register("population", tools.initRepeat, list, toolbox.individual)
153
  toolbox.register("mate", tools.cxTwoPoint)
154
  toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
@@ -156,21 +132,21 @@ def evolve_emotions():
156
  toolbox.register("evaluate", evaluate)
157
 
158
  population = toolbox.population(n=100)
159
- algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=100,
160
  stats=None, halloffame=None, verbose=False)
161
 
162
  best_individual = tools.selBest(population, k=1)[0]
163
- emotion_values = best_individual[:len(emotions) - 1]
164
- intensities = best_individual[len(emotions) - 1:-1]
165
- ideal_state = best_individual[-1]
166
 
167
- for i, (emotion, data) in enumerate(list(emotions.items())[:-1]): # Exclude 'ideal_state'
168
- if i < len(emotion_values):
169
- data['percentage'] = emotion_values[i]
170
- if i < len(intensities):
171
- data['intensity'] = intensities[i]
172
 
173
- emotions['ideal_state']['percentage'] = ideal_state
 
 
 
174
 
175
  def update_emotion_history(emotion, percentage, intensity, context):
176
  entry = {
@@ -193,19 +169,37 @@ def feature_transformations():
193
  for feature in additional_features:
194
  additional_features[feature] += random.uniform(-1, 1)
195
 
196
- def generate_response(input_text):
197
- inputs = response_tokenizer(input_text, return_tensors="pt", truncation=True, max_length=512)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  with torch.no_grad():
199
  response_ids = response_model.generate(
200
  inputs.input_ids,
201
- max_length=150,
202
  num_return_sequences=1,
203
  no_repeat_ngram_size=2,
204
  top_k=50,
205
  top_p=0.95,
206
- temperature=0.7
207
  )
208
  response = response_tokenizer.decode(response_ids[0], skip_special_tokens=True)
 
 
 
209
  return response
210
 
211
  def predict_emotion(context):
@@ -262,16 +256,33 @@ def interactive_interface(input_text):
262
  entities = extract_entities(input_text)
263
  text_complexity = analyze_text_complexity(input_text)
264
 
 
265
  update_emotion(predicted_emotion, random.uniform(5, 15), random.uniform(0, 10))
266
- update_emotion_history(predicted_emotion, emotions[predicted_emotion]['percentage'], emotions[predicted_emotion]['intensity'], input_text)
267
- feature_transformations()
268
 
269
- response = generate_response(input_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270
 
271
  emotion_visualization = visualize_emotions()
272
 
273
  analysis_result = {
274
- 'predicted_emotion': predicted_emotion,
 
275
  'sentiment_scores': sentiment_scores,
276
  'entities': entities,
277
  'text_complexity': text_complexity,
@@ -291,11 +302,12 @@ def gradio_interface(input_text):
291
  return response, None
292
  else:
293
  return (
294
- f"Predicted Emotion: {response['predicted_emotion']}\n"
 
 
295
  f"Sentiment: {response['sentiment_scores']}\n"
296
  f"Entities: {response['entities']}\n"
297
- f"Text Complexity: {response['text_complexity']}\n"
298
- f"Response: {response['response']}\n",
299
  response['emotion_visualization']
300
  )
301
 
@@ -309,5 +321,4 @@ iface = gr.Interface(
309
  )
310
 
311
  if __name__ == "__main__":
312
- iface.launch()
313
-
 
6
  import random
7
  import gradio as gr
8
  import torch
 
9
  from sklearn.preprocessing import OneHotEncoder
10
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
11
  from deap import base, creator, tools, algorithms
 
49
  encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=True)
50
  except TypeError:
51
  encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
52
+ contexts_encoded = encoder.fit_transform(df[['context']])
53
+
54
  # Encoding emotions
55
  emotions_target = pd.Categorical(df['emotion']).codes
56
  emotion_classes = pd.Categorical(df['emotion']).categories
 
67
  # Enhanced Emotional States
68
  emotions = {
69
  'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
 
70
  'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
 
71
  'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
 
 
 
 
 
 
72
  'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
73
+ 'love': {'percentage': 10, 'motivation': 'affectionate', 'intensity': 0},
74
  'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
75
+ 'neutral': {'percentage': 40, 'motivation': 'balanced', 'intensity': 0},
 
 
 
 
 
 
 
 
 
76
  }
77
 
78
+ total_percentage = 100
79
  emotion_history_file = 'emotion_history.json'
80
+ conversation_history = []
81
+ max_history_length = 30
82
 
83
  def load_historical_data(file_path=emotion_history_file):
84
  if os.path.exists(file_path):
 
93
  emotion_history = load_historical_data()
94
 
95
  def update_emotion(emotion, percentage, intensity):
 
 
 
 
96
  emotions[emotion]['percentage'] += percentage
97
  emotions[emotion]['intensity'] = intensity
98
 
99
+ # Normalize percentages
100
+ total = sum(e['percentage'] for e in emotions.values())
101
  for e in emotions:
102
+ emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
 
 
 
 
 
 
103
 
104
  def normalize_context(context):
105
  return context.lower().strip()
 
109
  creator.create("Individual", list, fitness=creator.FitnessMulti)
110
 
111
  def evaluate(individual):
112
+ emotion_values = individual[:len(emotions)]
113
+ intensities = individual[len(emotions):]
 
114
 
115
+ total_diff = abs(100 - sum(emotion_values))
 
116
  intensity_range = max(intensities) - min(intensities)
117
+ emotion_balance = max(emotion_values) - min(emotion_values)
118
 
119
+ return total_diff, intensity_range, emotion_balance
120
 
121
  def evolve_emotions():
122
  toolbox = base.Toolbox()
123
+ toolbox.register("attr_float", random.uniform, 0, 100)
124
  toolbox.register("attr_intensity", random.uniform, 0, 10)
125
  toolbox.register("individual", tools.initCycle, creator.Individual,
126
+ (toolbox.attr_float,) * len(emotions) +
127
+ (toolbox.attr_intensity,) * len(emotions), n=1)
 
128
  toolbox.register("population", tools.initRepeat, list, toolbox.individual)
129
  toolbox.register("mate", tools.cxTwoPoint)
130
  toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
 
132
  toolbox.register("evaluate", evaluate)
133
 
134
  population = toolbox.population(n=100)
135
+ algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=50,
136
  stats=None, halloffame=None, verbose=False)
137
 
138
  best_individual = tools.selBest(population, k=1)[0]
139
+ emotion_values = best_individual[:len(emotions)]
140
+ intensities = best_individual[len(emotions):]
 
141
 
142
+ for i, (emotion, data) in enumerate(emotions.items()):
143
+ data['percentage'] = emotion_values[i]
144
+ data['intensity'] = intensities[i]
 
 
145
 
146
+ # Normalize percentages
147
+ total = sum(e['percentage'] for e in emotions.values())
148
+ for e in emotions:
149
+ emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
150
 
151
  def update_emotion_history(emotion, percentage, intensity, context):
152
  entry = {
 
169
  for feature in additional_features:
170
  additional_features[feature] += random.uniform(-1, 1)
171
 
172
+ def generate_response(input_text, ai_emotion):
173
+ # Prepare a prompt based on the current emotion and input
174
+ prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
175
+
176
+ # Add conversation history to the prompt
177
+ for entry in conversation_history[-5:]: # Use last 5 entries for context
178
+ prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
179
+
180
+ inputs = response_tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024)
181
+
182
+ # Adjust generation parameters based on emotion
183
+ temperature = 0.7
184
+ if ai_emotion == 'anger':
185
+ temperature = 0.9 # More randomness for angry responses
186
+ elif ai_emotion == 'joy':
187
+ temperature = 0.5 # More focused responses for joyful state
188
+
189
  with torch.no_grad():
190
  response_ids = response_model.generate(
191
  inputs.input_ids,
192
+ max_length=1024,
193
  num_return_sequences=1,
194
  no_repeat_ngram_size=2,
195
  top_k=50,
196
  top_p=0.95,
197
+ temperature=temperature
198
  )
199
  response = response_tokenizer.decode(response_ids[0], skip_special_tokens=True)
200
+
201
+ # Extract only the AI's response
202
+ response = response.split("AI:")[-1].strip()
203
  return response
204
 
205
  def predict_emotion(context):
 
256
  entities = extract_entities(input_text)
257
  text_complexity = analyze_text_complexity(input_text)
258
 
259
+ # Update AI's emotional state based on input
260
  update_emotion(predicted_emotion, random.uniform(5, 15), random.uniform(0, 10))
 
 
261
 
262
+ # Determine AI's current dominant emotion
263
+ ai_emotion = max(emotions, key=lambda e: emotions[e]['percentage'])
264
+
265
+ # Generate response based on AI's emotion
266
+ response = generate_response(input_text, ai_emotion)
267
+
268
+ # Update conversation history
269
+ conversation_history.append({
270
+ 'user': input_text,
271
+ 'response': response
272
+ })
273
+
274
+ # Trim conversation history if it exceeds the maximum length
275
+ if len(conversation_history) > max_history_length:
276
+ conversation_history = conversation_history[-max_history_length:]
277
+
278
+ update_emotion_history(ai_emotion, emotions[ai_emotion]['percentage'], emotions[ai_emotion]['intensity'], input_text)
279
+ feature_transformations()
280
 
281
  emotion_visualization = visualize_emotions()
282
 
283
  analysis_result = {
284
+ 'predicted_user_emotion': predicted_emotion,
285
+ 'ai_emotion': ai_emotion,
286
  'sentiment_scores': sentiment_scores,
287
  'entities': entities,
288
  'text_complexity': text_complexity,
 
302
  return response, None
303
  else:
304
  return (
305
+ f"User Emotion: {response['predicted_user_emotion']}\n"
306
+ f"AI Emotion: {response['ai_emotion']}\n"
307
+ f"AI Response: {response['response']}\n\n"
308
  f"Sentiment: {response['sentiment_scores']}\n"
309
  f"Entities: {response['entities']}\n"
310
+ f"Text Complexity: {response['text_complexity']}\n",
 
311
  response['emotion_visualization']
312
  )
313
 
 
321
  )
322
 
323
  if __name__ == "__main__":
324
+ iface.launch()