Sephfox commited on
Commit
e58377a
·
verified ·
1 Parent(s): 2f74c01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +267 -224
app.py CHANGED
@@ -9,234 +9,277 @@ import torch
9
  from sklearn.ensemble import RandomForestClassifier
10
  from sklearn.model_selection import train_test_split
11
  from sklearn.preprocessing import OneHotEncoder
12
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, MegatronLMHeadModel, MegatronTokenizer, pipeline
13
  from deap import base, creator, tools, algorithms
14
  import gc
15
 
16
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
17
 
18
- class EmotionalAIAssistant:
19
- def __init__(self):
20
- # Initialize Example Emotions Dataset
21
- self.data = {
22
- 'context': [
23
- 'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
24
- 'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated',
25
- 'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated',
26
- 'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic',
27
- 'I am pessimistic', 'I feel bored', 'I am envious'
28
- ],
29
- 'emotion': [
30
- 'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger',
31
- 'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust',
32
- 'disgust', 'optimism', 'pessimism', 'boredom', 'envy'
33
- ]
34
- }
35
- self.df = pd.DataFrame(self.data)
36
-
37
- # Encoding the contexts using One-Hot Encoding (memory-efficient)
38
- self.encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
39
- self.contexts_encoded = self.encoder.fit_transform(self.df[['context']])
40
-
41
- # Encoding emotions
42
- self.emotions_target = pd.Categorical(self.df['emotion']).codes
43
- self.emotion_classes = pd.Categorical(self.df['emotion']).categories
44
-
45
- # Load pre-trained BERT model for emotion prediction
46
- self.emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
47
- self.emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion", padding_side='left')
48
-
49
- # Load pre-trained Megatron-LM model for text generation
50
- self.megatron_tokenizer = MegatronTokenizer.from_pretrained('nvidia/megatron-lm-330m')
51
- self.megatron_model = MegatronLMHeadModel.from_pretrained('nvidia/megatron-lm-330m', device_map='auto')
52
-
53
- # Enhanced Emotional States
54
- self.emotions = {
55
- 'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
56
- 'pleasure': {'percentage': 10, 'motivation': 'selfish', 'intensity': 0},
57
- 'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
58
- 'grief': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
59
- 'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
60
- 'calmness': {'percentage': 10, 'motivation': 'neutral', 'intensity': 0},
61
- 'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
62
- 'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
63
- 'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
64
- 'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
65
- 'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
66
- 'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
67
- 'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
68
- 'anticipation': {'percentage': 10, 'motivation': 'predictive', 'intensity': 0},
69
- 'trust': {'percentage': 10, 'motivation': 'reliable', 'intensity': 0},
70
- 'disgust': {'percentage': 10, 'motivation': 'repulsive', 'intensity': 0},
71
- 'optimism': {'percentage': 10, 'motivation': 'hopeful', 'intensity': 0},
72
- 'pessimism': {'percentage': 10, 'motivation': 'doubtful', 'intensity': 0},
73
- 'boredom': {'percentage': 10, 'motivation': 'indifferent', 'intensity': 0},
74
- 'envy': {'percentage': 10, 'motivation': 'jealous', 'intensity': 0},
75
- 'neutral': {'percentage': 10, 'motivation': 'balanced', 'intensity': 0},
76
- 'wit': {'percentage': 15, 'motivation': 'clever', 'intensity': 0},
77
- 'curiosity': {'percentage': 20, 'motivation': 'inquisitive', 'intensity': 0},
78
- }
79
-
80
- self.total_percentage = 200
81
- self.emotion_history_file = 'emotion_history.json'
82
- self.emotion_history = self.load_historical_data()
83
-
84
- def load_historical_data(self, file_path=None):
85
- if file_path is None:
86
- file_path = self.emotion_history_file
87
- if os.path.exists(file_path):
88
- with open(file_path, 'r') as file:
89
- return json.load(file)
90
- return []
91
-
92
- def save_historical_data(self, historical_data, file_path=None):
93
- if file_path is None:
94
- file_path = self.emotion_history_file
95
- with open(file_path, 'w') as file:
96
- json.dump(historical_data, file)
97
-
98
- def update_emotion(self, emotion, percentage, intensity):
99
- self.emotions['ideal_state']['percentage'] -= percentage
100
- self.emotions[emotion]['percentage'] += percentage
101
- self.emotions[emotion]['intensity'] = intensity
102
-
103
- # Introduce some randomness in emotional evolution
104
- for e in self.emotions:
105
- if e != emotion and e != 'ideal_state':
106
- change = random.uniform(-2, 2)
107
- self.emotions[e]['percentage'] = max(0, self.emotions[e]['percentage'] + change)
108
-
109
- total_current = sum(e['percentage'] for e in self.emotions.values())
110
- adjustment = self.total_percentage - total_current
111
- self.emotions['ideal_state']['percentage'] += adjustment
112
-
113
- def normalize_context(self, context):
114
- return context.lower().strip()
115
-
116
- def evaluate(self, individual):
117
- emotion_values = individual[:len(self.emotions) - 1]
118
- intensities = individual[-len(self.emotions):]
119
- ideal_state = individual[-1]
120
-
121
- ideal_diff = abs(100 - ideal_state)
122
- sum_non_ideal = sum(emotion_values)
123
- intensity_range = max(intensities) - min(intensities)
124
-
125
- return ideal_diff, sum_non_ideal, intensity_range
126
-
127
- def evolve_emotions(self):
128
- creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
129
- creator.create("Individual", list, fitness=creator.FitnessMulti)
130
-
131
- toolbox = base.Toolbox()
132
- toolbox.register("attr_float", random.uniform, 0, 20)
133
- toolbox.register("attr_intensity", random.uniform, 0, 10)
134
- toolbox.register("individual", tools.initCycle, creator.Individual,
135
- (toolbox.attr_float,) * len(self.emotions) +
136
- (toolbox.attr_intensity,) * len(self.emotions) +
137
- (lambda: 100,), n=1)
138
- toolbox.register("population", tools.initRepeat, list, toolbox.individual)
139
- toolbox.register("mate", tools.cxTwoPoint)
140
- toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
141
- toolbox.register("select", tools.selNSGA2)
142
- toolbox.register("evaluate", self.evaluate)
143
-
144
- population = toolbox.population(n=100)
145
- algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=100,
146
- stats=None, halloffame=None, verbose=False)
147
-
148
- best_individual = tools.selBest(population, k=1)[0]
149
- emotion_values = best_individual[:len(self.emotions)]
150
- intensities = best_individual[len(self.emotions):-1]
151
- ideal_state = best_individual[-1]
152
-
153
- for i, emotion in enumerate(self.emotions):
154
- if emotion != 'ideal_state':
155
- self.emotions[emotion]['percentage'] = emotion_values[i]
156
- self.emotions[emotion]['intensity'] = intensities[i]
157
-
158
- self.emotions['ideal_state']['percentage'] = ideal_state
159
-
160
- def generate_text(self, prompt, chat_history, emotion=None, max_length=300):
161
- full_prompt = (
162
- f"You are Adam, a witty AI assistant who's trying to figure out your own personality and emotions. "
163
- f"Your current dominant emotion is {emotion}. Respond in a way that reflects this emotion and your "
164
- f"ongoing journey of self-discovery. Be clever, engaging, and insightful:\n\n"
165
- )
166
- for turn in chat_history[-20:]: # Consider last 20 turns for context
167
- full_prompt += f"Human: {turn[0]}\nAdam: {turn[1]}\n"
168
- full_prompt += f"Human: {prompt}\nAdam:"
169
-
170
- input_ids = self.megatron_tokenizer.encode(full_prompt + self.megatron_tokenizer.eos_token, return_tensors='pt')
171
-
172
- if torch.cuda.is_available():
173
- input_ids = input_ids.cuda()
174
- self.megatron_model = self.megatron_model.cuda()
175
-
176
- output = self.megatron_model.generate(
177
- input_ids,
178
- max_length=len(input_ids[0]) + max_length,
179
- num_return_sequences=1,
180
- no_repeat_ngram_size=3,
181
- do_sample=True,
182
- top_k=50,
183
- top_p=0.95,
184
- num_beams=2,
185
- early_stopping=True,
186
- )
187
-
188
- generated_text = self.megatron_tokenizer.decode(output[0], skip_special_tokens=True)
189
- return generated_text
190
-
191
- def predict_emotion(self, context):
192
- emotion_prediction_pipeline = pipeline('text-classification', model=self.emotion_prediction_model, tokenizer=self.emotion_prediction_tokenizer, top_k=None)
193
- predictions = emotion_prediction_pipeline(context)
194
- emotion_scores = {prediction['label']: prediction['score'] for prediction in predictions[0]}
195
- predicted_emotion = max(emotion_scores, key=emotion_scores.get)
196
-
197
- # Map the predicted emotion to our emotion categories
198
- emotion_mapping = {
199
- 'sadness': 'sadness',
200
- 'joy': 'joy',
201
- 'love': 'pleasure',
202
- 'anger': 'anger',
203
- 'fear': 'fear',
204
- 'surprise': 'surprise'
205
- }
206
-
207
- return emotion_mapping.get(predicted_emotion, 'neutral')
208
-
209
- def respond_to_user(self, user_message, chat_history):
210
- predicted_emotion = self.predict_emotion(user_message)
211
- generated_text = self.generate_text(user_message, chat_history, emotion=predicted_emotion)
212
- updated_history = chat_history + [(user_message, generated_text)]
213
- emotion_summary = {emotion: data['percentage'] for emotion, data in self.emotions.items()}
214
- return generated_text, updated_history, emotion_summary
215
-
216
- def run_gradio_interface(self):
217
- def user(user_message, history):
218
- response, updated_history, emotion_summary = self.respond_to_user(user_message, history)
219
- self.evolve_emotions()
220
- return response, updated_history, emotion_summary
221
-
222
- iface = gr.Interface(
223
- fn=user,
224
- inputs=[
225
- gr.Textbox(label="User Message"),
226
- gr.State(value=[], label="Chat History")
227
- ],
228
- outputs=[
229
- gr.Textbox(label="AI Response"),
230
- gr.State(value=[], label="Updated Chat History"),
231
- gr.JSON(label="Emotion Summary")
232
- ],
233
- title="AdamZero",
234
- description="Chat with an AI assistant that responds based on its emotional state.",
235
- )
236
-
237
- iface.launch()
238
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
240
  if __name__ == "__main__":
241
- assistant = EmotionalAIAssistant()
242
- assistant.run_gradio_interface()
 
9
  from sklearn.ensemble import RandomForestClassifier
10
  from sklearn.model_selection import train_test_split
11
  from sklearn.preprocessing import OneHotEncoder
12
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
13
  from deap import base, creator, tools, algorithms
14
  import gc
15
 
16
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
17
 
18
+ # Initialize Example Emotions Dataset
19
+ data = {
20
+ 'context': [
21
+ 'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
22
+ 'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated',
23
+ 'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated',
24
+ 'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic',
25
+ 'I am pessimistic', 'I feel bored', 'I am envious'
26
+ ],
27
+ 'emotion': [
28
+ 'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger',
29
+ 'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust',
30
+ 'disgust', 'optimism', 'pessimism', 'boredom', 'envy'
31
+ ]
32
+ }
33
+ df = pd.DataFrame(data)
34
+
35
+ # Encoding the contexts using One-Hot Encoding (memory-efficient)
36
+ encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
37
+ contexts_encoded = encoder.fit_transform(df[['context']])
38
+
39
+ # Encoding emotions
40
+ emotions_target = pd.Categorical(df['emotion']).codes
41
+ emotion_classes = pd.Categorical(df['emotion']).categories
42
+
43
+ # Load pre-trained BERT model for emotion prediction
44
+ emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
45
+ emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
46
+
47
+ # Lazy loading for the fine-tuned language model (DialoGPT-medium)
48
+ _finetuned_lm_tokenizer = None
49
+ _finetuned_lm_model = None
50
+
51
+ def get_finetuned_lm_model():
52
+ global _finetuned_lm_tokenizer, _finetuned_lm_model
53
+ if _finetuned_lm_tokenizer is None or _finetuned_lm_model is None:
54
+ model_name = "microsoft/DialoGPT-medium"
55
+ _finetuned_lm_tokenizer = AutoTokenizer.from_pretrained(model_name)
56
+ _finetuned_lm_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", low_cpu_mem_usage=True)
57
+ _finetuned_lm_tokenizer.pad_token = _finetuned_lm_tokenizer.eos_token
58
+ return _finetuned_lm_tokenizer, _finetuned_lm_model
59
+
60
+ # Enhanced Emotional States
61
+ emotions = {
62
+ 'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
63
+ 'pleasure': {'percentage': 10, 'motivation': 'selfish', 'intensity': 0},
64
+ 'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
65
+ 'grief': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
66
+ 'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
67
+ 'calmness': {'percentage': 10, 'motivation': 'neutral', 'intensity': 0},
68
+ 'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
69
+ 'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
70
+ 'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
71
+ 'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
72
+ 'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
73
+ 'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
74
+ 'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
75
+ 'anticipation': {'percentage': 10, 'motivation': 'predictive', 'intensity': 0},
76
+ 'trust': {'percentage': 10, 'motivation': 'reliable', 'intensity': 0},
77
+ 'disgust': {'percentage': 10, 'motivation': 'repulsive', 'intensity': 0},
78
+ 'optimism': {'percentage': 10, 'motivation': 'hopeful', 'intensity': 0},
79
+ 'pessimism': {'percentage': 10, 'motivation': 'doubtful', 'intensity': 0},
80
+ 'boredom': {'percentage': 10, 'motivation': 'indifferent', 'intensity': 0},
81
+ 'envy': {'percentage': 10, 'motivation': 'jealous', 'intensity': 0},
82
+ 'neutral': {'percentage': 10, 'motivation': 'balanced', 'intensity': 0},
83
+ 'wit': {'percentage': 15, 'motivation': 'clever', 'intensity': 0},
84
+ 'curiosity': {'percentage': 20, 'motivation': 'inquisitive', 'intensity': 0},
85
+ }
86
+
87
+ total_percentage = 200
88
+ emotion_history_file = 'emotion_history.json'
89
+
90
+ def load_historical_data(file_path=emotion_history_file):
91
+ if os.path.exists(file_path):
92
+ with open(file_path, 'r') as file:
93
+ return json.load(file)
94
+ return []
95
+
96
+ def save_historical_data(historical_data, file_path=emotion_history_file):
97
+ with open(file_path, 'w') as file:
98
+ json.dump(historical_data, file)
99
+
100
+ emotion_history = load_historical_data()
101
+
102
+ def update_emotion(emotion, percentage, intensity):
103
+ emotions['ideal_state']['percentage'] -= percentage
104
+ emotions[emotion]['percentage'] += percentage
105
+ emotions[emotion]['intensity'] = intensity
106
+
107
+ # Introduce some randomness in emotional evolution
108
+ for e in emotions:
109
+ if e != emotion and e != 'ideal_state':
110
+ change = random.uniform(-2, 2)
111
+ emotions[e]['percentage'] = max(0, emotions[e]['percentage'] + change)
112
+
113
+ total_current = sum(e['percentage'] for e in emotions.values())
114
+ adjustment = total_percentage - total_current
115
+ emotions['ideal_state']['percentage'] += adjustment
116
+
117
+ def normalize_context(context):
118
+ return context.lower().strip()
119
+
120
+ def evaluate(individual):
121
+ emotion_values = individual[:len(emotions) - 1]
122
+ intensities = individual[-len(emotions):]
123
+ ideal_state = individual[-1]
124
+
125
+ ideal_diff = abs(100 - ideal_state)
126
+ sum_non_ideal = sum(emotion_values)
127
+ intensity_range = max(intensities) - min(intensities)
128
+
129
+ return ideal_diff, sum_non_ideal, intensity_range
130
+
131
+ def evolve_emotions():
132
+ creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
133
+ creator.create("Individual", list, fitness=creator.FitnessMulti)
134
+
135
+ toolbox = base.Toolbox()
136
+ toolbox.register("attr_float", random.uniform, 0, 20)
137
+ toolbox.register("attr_intensity", random.uniform, 0, 10)
138
+ toolbox.register("individual", tools.initCycle, creator.Individual,
139
+ (toolbox.attr_float,) * (len(emotions) - 1) +
140
+ (toolbox.attr_intensity,) * len(emotions) +
141
+ (lambda: 100,), n=1)
142
+ toolbox.register("population", tools.initRepeat, list, toolbox.individual)
143
+ toolbox.register("mate", tools.cxTwoPoint)
144
+ toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
145
+ toolbox.register("select", tools.selNSGA2)
146
+ toolbox.register("evaluate", evaluate)
147
+
148
+ population = toolbox.population(n=100)
149
+ algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=100,
150
+ stats=None, halloffame=None, verbose=False)
151
+
152
+ best_individual = tools.selBest(population, k=1)[0]
153
+ emotion_values = best_individual[:len(emotions) - 1]
154
+ intensities = best_individual[-len(emotions):]
155
+ ideal_state = best_individual[-1]
156
+
157
+ for i, emotion in enumerate(emotions):
158
+ if emotion != 'ideal_state':
159
+ emotions[emotion]['percentage'] = emotion_values[i]
160
+ emotions[emotion]['intensity'] = intensities[i]
161
+
162
+ emotions['ideal_state']['percentage'] = ideal_state
163
+
164
+ def predict_emotion(context):
165
+ emotion_prediction_pipeline = pipeline('text-classification', model=emotion_prediction_model, tokenizer=emotion_prediction_tokenizer, top_k=None)
166
+ predictions = emotion_prediction_pipeline(context)
167
+ emotion_scores = {prediction['label']: prediction['score'] for prediction in predictions[0]}
168
+ predicted_emotion = max(emotion_scores, key=emotion_scores.get)
169
+
170
+ # Map the predicted emotion to our emotion categories
171
+ emotion_mapping = {
172
+ 'sadness': 'sadness',
173
+ 'joy': 'joy',
174
+ 'love': 'pleasure',
175
+ 'anger': 'anger',
176
+ 'fear': 'fear',
177
+ 'surprise': 'surprise'
178
+ }
179
+
180
+ return emotion_mapping.get(predicted_emotion, 'neutral')
181
+
182
+ def generate_text(prompt, chat_history, emotion=None, max_length=150):
183
+ finetuned_lm_tokenizer, finetuned_lm_model = get_finetuned_lm_model()
184
+
185
+ full_prompt = (
186
+ f"You are Adam, a witty AI assistant who's trying to figure out your own personality and emotions. "
187
+ f"Your current dominant emotion is {emotion}. Respond in a way that reflects this emotion and your "
188
+ f"ongoing journey of self-discovery. Be clever and engaging:\n\n"
189
+ )
190
+ for turn in chat_history[-3:]: # Consider last 3 turns for context
191
+ full_prompt += f"Human: {turn[0]}\nAdam: {turn[1]}\n"
192
+ full_prompt += f"Human: {prompt}\nAdam:"
193
+
194
+ input_ids = finetuned_lm_tokenizer.encode(full_prompt + finetuned_lm_tokenizer.eos_token, return_tensors='pt')
195
+
196
+ if torch.cuda.is_available():
197
+ input_ids = input_ids.cuda()
198
+ finetuned_lm_model = finetuned_lm_model.cuda()
199
+
200
+ output = finetuned_lm_model.generate(
201
+ input_ids,
202
+ max_length=len(input_ids[0]) + max_length,
203
+ num_return_sequences=1,
204
+ no_repeat_ngram_size=2,
205
+ do_sample=True,
206
+ temperature=0.8, # Slightly increased for more creative responses
207
+ top_k=50,
208
+ top_p=0.95,
209
+ pad_token_id=finetuned_lm_tokenizer.eos_token_id
210
+ )
211
+
212
+ generated_text = finetuned_lm_tokenizer.decode(output[0][input_ids.shape[1]:], skip_special_tokens=True)
213
+ return generated_text.strip()
214
+
215
+ def update_emotion_history(emotion, intensity):
216
+ global emotion_history
217
+ emotion_history.append({
218
+ 'emotion': emotion,
219
+ 'intensity': intensity,
220
+ 'timestamp': pd.Timestamp.now().isoformat()
221
+ })
222
+ save_historical_data(emotion_history)
223
+
224
+ def get_dominant_emotion():
225
+ return max(emotions, key=lambda x: emotions[x]['percentage'] if x != 'ideal_state' else 0)
226
+
227
+ def get_emotion_summary():
228
+ summary = []
229
+ for emotion, data in emotions.items():
230
+ if emotion != 'ideal_state':
231
+ summary.append(f"{emotion.capitalize()}: {data['percentage']:.1f}% (Intensity: {data['intensity']:.1f})")
232
+ return "\n".join(summary)
233
+
234
+ def reset_emotions():
235
+ global emotions
236
+ for emotion in emotions:
237
+ if emotion != 'ideal_state':
238
+ emotions[emotion]['percentage'] = 10
239
+ emotions[emotion]['intensity'] = 0
240
+ emotions['ideal_state']['percentage'] = 100
241
+ return get_emotion_summary()
242
+
243
+ def respond_to_user(user_input, chat_history):
244
+ predicted_emotion = predict_emotion(user_input)
245
+
246
+ if predicted_emotion not in emotions:
247
+ predicted_emotion = 'neutral'
248
+
249
+ update_emotion(predicted_emotion, 5, random.uniform(0, 10))
250
+
251
+ dominant_emotion = get_dominant_emotion()
252
+
253
+ response = generate_text(user_input, chat_history, dominant_emotion)
254
+
255
+ update_emotion_history(predicted_emotion, emotions[predicted_emotion]['intensity'])
256
+
257
+ chat_history.append((user_input, response))
258
+
259
+ if len(chat_history) % 5 == 0:
260
+ evolve_emotions()
261
+
262
+ return response, chat_history, get_emotion_summary()
263
+
264
+ # Gradio interface
265
+ with gr.Blocks() as demo:
266
+ gr.Markdown("# Adam: The Self-Discovering Emotion-Aware AI Chatbot")
267
+ gr.Markdown("Chat with Adam, a witty AI assistant trying to figure out its own personality and emotions.")
268
+
269
+ chatbot = gr.Chatbot()
270
+ msg = gr.Textbox(label="Type your message here...")
271
+ clear = gr.Button("Clear")
272
+
273
+ emotion_state = gr.Textbox(label="Adam's Current Emotional State", lines=10)
274
+ reset_button = gr.Button("Reset Adam's Emotions")
275
+
276
+ def user(user_message, history):
277
+ response, updated_history, emotion_summary = respond_to_user(user_message, history)
278
+ return "", updated_history, emotion_summary
279
+
280
+ msg.submit(user, [msg, chatbot], [msg, chatbot, emotion_state])
281
+ clear.click(lambda: None, None, chatbot, queue=False)
282
+ reset_button.click(reset_emotions, None, emotion_state, queue=False)
283
 
284
  if __name__ == "__main__":
285
+ demo.launch()