Sephfox commited on
Commit
df44613
·
verified ·
1 Parent(s): 49ae452

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +223 -266
app.py CHANGED
@@ -9,276 +9,233 @@ import torch
9
  from sklearn.ensemble import RandomForestClassifier
10
  from sklearn.model_selection import train_test_split
11
  from sklearn.preprocessing import OneHotEncoder
12
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
13
  from deap import base, creator, tools, algorithms
14
  import gc
15
 
16
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
17
 
18
- # Initialize Example Emotions Dataset
19
- data = {
20
- 'context': [
21
- 'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
22
- 'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated',
23
- 'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated',
24
- 'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic',
25
- 'I am pessimistic', 'I feel bored', 'I am envious'
26
- ],
27
- 'emotion': [
28
- 'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger',
29
- 'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust',
30
- 'disgust', 'optimism', 'pessimism', 'boredom', 'envy'
31
- ]
32
- }
33
- df = pd.DataFrame(data)
34
-
35
- # Encoding the contexts using One-Hot Encoding (memory-efficient)
36
- encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
37
- contexts_encoded = encoder.fit_transform(df[['context']])
38
-
39
- # Encoding emotions
40
- emotions_target = pd.Categorical(df['emotion']).codes
41
- emotion_classes = pd.Categorical(df['emotion']).categories
42
-
43
- # Load pre-trained BERT model for emotion prediction
44
- emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
45
- emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion", padding_side='left')
46
-
47
- # Lazy loading for the fine-tuned language model (DialoGPT-medium)
48
- _finetuned_lm_tokenizer = None
49
- _finetuned_lm_model = None
50
-
51
- def get_finetuned_lm_model():
52
- global _finetuned_lm_tokenizer, _finetuned_lm_model
53
- if _finetuned_lm_tokenizer is None or _finetuned_lm_model is None:
54
- model_name = "microsoft/DialoGPT-medium"
55
- _finetuned_lm_tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left')
56
- _finetuned_lm_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", low_cpu_mem_usage=True)
57
- _finetuned_lm_tokenizer.pad_token = _finetuned_lm_tokenizer.eos_token
58
- return _finetuned_lm_tokenizer, _finetuned_lm_model
59
-
60
- # Enhanced Emotional States
61
- emotions = {
62
- 'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
63
- 'pleasure': {'percentage': 10, 'motivation': 'selfish', 'intensity': 0},
64
- 'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
65
- 'grief': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
66
- 'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
67
- 'calmness': {'percentage': 10, 'motivation': 'neutral', 'intensity': 0},
68
- 'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
69
- 'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
70
- 'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
71
- 'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
72
- 'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
73
- 'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
74
- 'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
75
- 'anticipation': {'percentage': 10, 'motivation': 'predictive', 'intensity': 0},
76
- 'trust': {'percentage': 10, 'motivation': 'reliable', 'intensity': 0},
77
- 'disgust': {'percentage': 10, 'motivation': 'repulsive', 'intensity': 0},
78
- 'optimism': {'percentage': 10, 'motivation': 'hopeful', 'intensity': 0},
79
- 'pessimism': {'percentage': 10, 'motivation': 'doubtful', 'intensity': 0},
80
- 'boredom': {'percentage': 10, 'motivation': 'indifferent', 'intensity': 0},
81
- 'envy': {'percentage': 10, 'motivation': 'jealous', 'intensity': 0},
82
- 'neutral': {'percentage': 10, 'motivation': 'balanced', 'intensity': 0},
83
- 'wit': {'percentage': 15, 'motivation': 'clever', 'intensity': 0},
84
- 'curiosity': {'percentage': 20, 'motivation': 'inquisitive', 'intensity': 0},
85
- }
86
-
87
- total_percentage = 200
88
- emotion_history_file = 'emotion_history.json'
89
-
90
- def load_historical_data(file_path=emotion_history_file):
91
- if os.path.exists(file_path):
92
- with open(file_path, 'r') as file:
93
- return json.load(file)
94
- return []
95
-
96
- def save_historical_data(historical_data, file_path=emotion_history_file):
97
- with open(file_path, 'w') as file:
98
- json.dump(historical_data, file)
99
-
100
- emotion_history = load_historical_data()
101
-
102
- def update_emotion(emotion, percentage, intensity):
103
- emotions['ideal_state']['percentage'] -= percentage
104
- emotions[emotion]['percentage'] += percentage
105
- emotions[emotion]['intensity'] = intensity
106
-
107
- # Introduce some randomness in emotional evolution
108
- for e in emotions:
109
- if e != emotion and e != 'ideal_state':
110
- change = random.uniform(-2, 2)
111
- emotions[e]['percentage'] = max(0, emotions[e]['percentage'] + change)
112
-
113
- total_current = sum(e['percentage'] for e in emotions.values())
114
- adjustment = total_percentage - total_current
115
- emotions['ideal_state']['percentage'] += adjustment
116
-
117
- def normalize_context(context):
118
- return context.lower().strip()
119
-
120
- def evaluate(individual):
121
- emotion_values = individual[:len(emotions) - 1]
122
- intensities = individual[-len(emotions):]
123
- ideal_state = individual[-1]
124
-
125
- ideal_diff = abs(100 - ideal_state)
126
- sum_non_ideal = sum(emotion_values)
127
- intensity_range = max(intensities) - min(intensities)
128
-
129
- return ideal_diff, sum_non_ideal, intensity_range
130
-
131
- def evolve_emotions():
132
- creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
133
- creator.create("Individual", list, fitness=creator.FitnessMulti)
134
-
135
- toolbox = base.Toolbox()
136
- toolbox.register("attr_float", random.uniform, 0, 20)
137
- toolbox.register("attr_intensity", random.uniform, 0, 10)
138
- toolbox.register("individual", tools.initCycle, creator.Individual,
139
- (toolbox.attr_float,) * len(emotions) +
140
- (toolbox.attr_intensity,) * len(emotions) +
141
- (lambda: 100,), n=1)
142
- toolbox.register("population", tools.initRepeat, list, toolbox.individual)
143
- toolbox.register("mate", tools.cxTwoPoint)
144
- toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
145
- toolbox.register("select", tools.selNSGA2)
146
- toolbox.register("evaluate", evaluate)
147
-
148
- population = toolbox.population(n=100)
149
- algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=100,
150
- stats=None, halloffame=None, verbose=False)
151
-
152
- best_individual = tools.selBest(population, k=1)[0]
153
- emotion_values = best_individual[:len(emotions)]
154
- intensities = best_individual[len(emotions):-1]
155
- ideal_state = best_individual[-1]
156
-
157
- for i, emotion in enumerate(emotions):
158
- if emotion != 'ideal_state':
159
- emotions[emotion]['percentage'] = emotion_values[i]
160
- emotions[emotion]['intensity'] = intensities[i]
161
-
162
- emotions['ideal_state']['percentage'] = ideal_state
163
- def predict_emotion(context):
164
- emotion_prediction_pipeline = pipeline('text-classification', model=emotion_prediction_model, tokenizer=emotion_prediction_tokenizer, top_k=None)
165
- predictions = emotion_prediction_pipeline(context)
166
- emotion_scores = {prediction['label']: prediction['score'] for prediction in predictions[0]}
167
- predicted_emotion = max(emotion_scores, key=emotion_scores.get)
168
-
169
- # Map the predicted emotion to our emotion categories
170
- emotion_mapping = {
171
- 'sadness': 'sadness',
172
- 'joy': 'joy',
173
- 'love': 'pleasure',
174
- 'anger': 'anger',
175
- 'fear': 'fear',
176
- 'surprise': 'surprise'
177
- }
178
-
179
- return emotion_mapping.get(predicted_emotion, 'neutral')
180
-
181
- def generate_text(prompt, chat_history, emotion=None, max_length=150):
182
- finetuned_lm_tokenizer, finetuned_lm_model = get_finetuned_lm_model()
183
-
184
- full_prompt = (
185
- f"You are Adam, a witty AI assistant who's trying to figure out your own personality and emotions. "
186
- f"Your current dominant emotion is {emotion}. Respond in a way that reflects this emotion and your "
187
- f"ongoing journey of self-discovery. Be clever and engaging:\n\n"
188
- )
189
- for turn in chat_history[-3:]: # Consider last 3 turns for context
190
- full_prompt += f"Human: {turn[0]}\nAdam: {turn[1]}\n"
191
- full_prompt += f"Human: {prompt}\nAdam:"
192
-
193
- input_ids = finetuned_lm_tokenizer.encode(full_prompt + finetuned_lm_tokenizer.eos_token, return_tensors='pt')
194
-
195
- if torch.cuda.is_available():
196
- input_ids = input_ids.cuda()
197
- finetuned_lm_model = finetuned_lm_model.cuda()
198
-
199
- output = finetuned_lm_model.generate(
200
- input_ids,
201
- max_length=len(input_ids[0]) + max_length,
202
- num_return_sequences=1,
203
- no_repeat_ngram_size=2,
204
- do_sample=True,
205
- temperature=0.8, # Slightly increased for more creative responses
206
- top_k=50,
207
- top_p=0.95,
208
- pad_token_id=finetuned_lm_tokenizer.eos_token_id
209
- )
210
-
211
- generated_text = finetuned_lm_tokenizer.decode(output[0][input_ids.shape[1]:], skip_special_tokens=True)
212
- return generated_text.strip()
213
-
214
- def update_emotion_history(emotion, intensity):
215
- global emotion_history
216
- emotion_history.append({
217
- 'emotion': emotion,
218
- 'intensity': intensity,
219
- 'timestamp': pd.Timestamp.now().isoformat()
220
- })
221
- save_historical_data(emotion_history)
222
-
223
- def get_dominant_emotion():
224
- return max(emotions, key=lambda x: emotions[x]['percentage'] if x != 'ideal_state' else 0)
225
-
226
- def get_emotion_summary():
227
- summary = []
228
- for emotion, data in emotions.items():
229
- if emotion != 'ideal_state':
230
- summary.append(f"{emotion.capitalize()}: {data['percentage']:.1f}% (Intensity: {data['intensity']:.1f})")
231
- return "\n".join(summary)
232
-
233
- def reset_emotions():
234
- global emotions
235
- for emotion in emotions:
236
- if emotion != 'ideal_state':
237
- emotions[emotion]['percentage'] = 10
238
- emotions[emotion]['intensity'] = 0
239
- emotions['ideal_state']['percentage'] = 100
240
- return get_emotion_summary()
241
-
242
- def respond_to_user(user_input, chat_history):
243
- predicted_emotion = predict_emotion(user_input)
244
-
245
- if predicted_emotion not in emotions:
246
- predicted_emotion = 'neutral'
247
-
248
- update_emotion(predicted_emotion, 5, random.uniform(0, 10))
249
-
250
- dominant_emotion = get_dominant_emotion()
251
-
252
- response = generate_text(user_input, chat_history, dominant_emotion)
253
-
254
- update_emotion_history(predicted_emotion, emotions[predicted_emotion]['intensity'])
255
-
256
- chat_history.append((user_input, response))
257
-
258
- if len(chat_history) % 5 == 0:
259
- evolve_emotions()
260
-
261
- return response, chat_history, get_emotion_summary()
262
-
263
- # Gradio interface
264
- with gr.Blocks() as demo:
265
- gr.Markdown("# Adam: The Self-Discovering Emotion-Aware AI Chatbot")
266
- gr.Markdown("Chat with Adam, a witty AI assistant trying to figure out its own personality and emotions.")
267
-
268
- chatbot = gr.Chatbot()
269
- msg = gr.Textbox(label="Type your message here...")
270
- clear = gr.Button("Clear")
271
-
272
- emotion_state = gr.Textbox(label="Adam's Current Emotional State", lines=10)
273
- reset_button = gr.Button("Reset Adam's Emotions")
274
-
275
- def user(user_message, history):
276
- response, updated_history, emotion_summary = respond_to_user(user_message, history)
277
- return "", updated_history, emotion_summary
278
-
279
- msg.submit(user, [msg, chatbot], [msg, chatbot, emotion_state])
280
- clear.click(lambda: None, None, chatbot, queue=False)
281
- reset_button.click(reset_emotions, None, emotion_state, queue=False)
282
 
283
  if __name__ == "__main__":
284
- demo.launch()
 
 
9
  from sklearn.ensemble import RandomForestClassifier
10
  from sklearn.model_selection import train_test_split
11
  from sklearn.preprocessing import OneHotEncoder
12
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, GPTNeoForCausalLM, GPTNeoTokenizer, pipeline
13
  from deap import base, creator, tools, algorithms
14
  import gc
15
 
16
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
17
 
18
+ class EmotionalAIAssistant:
19
+ def __init__(self):
20
+ # Initialize Example Emotions Dataset
21
+ self.data = {
22
+ 'context': [
23
+ 'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
24
+ 'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated',
25
+ 'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated',
26
+ 'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic',
27
+ 'I am pessimistic', 'I feel bored', 'I am envious'
28
+ ],
29
+ 'emotion': [
30
+ 'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger',
31
+ 'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust',
32
+ 'disgust', 'optimism', 'pessimism', 'boredom', 'envy'
33
+ ]
34
+ }
35
+ self.df = pd.DataFrame(self.data)
36
+
37
+ # Encoding the contexts using One-Hot Encoding (memory-efficient)
38
+ self.encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
39
+ self.contexts_encoded = self.encoder.fit_transform(self.df[['context']])
40
+
41
+ # Encoding emotions
42
+ self.emotions_target = pd.Categorical(self.df['emotion']).codes
43
+ self.emotion_classes = pd.Categorical(self.df['emotion']).categories
44
+
45
+ # Load pre-trained BERT model for emotion prediction
46
+ self.emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
47
+ self.emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion", padding_side='left')
48
+
49
+ # Load pre-trained GPT-Neo-2.7B model for text generation
50
+ self.gpt_neo_tokenizer = GPTNeoTokenizer.from_pretrained('EleutherAI/gpt-neo-2.7B')
51
+ self.gpt_neo_model = GPTNeoForCausalLM.from_pretrained('EleutherAI/gpt-neo-2.7B', device_map='auto')
52
+
53
+ # Enhanced Emotional States
54
+ self.emotions = {
55
+ 'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
56
+ 'pleasure': {'percentage': 10, 'motivation': 'selfish', 'intensity': 0},
57
+ 'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
58
+ 'grief': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
59
+ 'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
60
+ 'calmness': {'percentage': 10, 'motivation': 'neutral', 'intensity': 0},
61
+ 'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
62
+ 'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
63
+ 'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
64
+ 'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
65
+ 'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
66
+ 'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
67
+ 'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
68
+ 'anticipation': {'percentage': 10, 'motivation': 'predictive', 'intensity': 0},
69
+ 'trust': {'percentage': 10, 'motivation': 'reliable', 'intensity': 0},
70
+ 'disgust': {'percentage': 10, 'motivation': 'repulsive', 'intensity': 0},
71
+ 'optimism': {'percentage': 10, 'motivation': 'hopeful', 'intensity': 0},
72
+ 'pessimism': {'percentage': 10, 'motivation': 'doubtful', 'intensity': 0},
73
+ 'boredom': {'percentage': 10, 'motivation': 'indifferent', 'intensity': 0},
74
+ 'envy': {'percentage': 10, 'motivation': 'jealous', 'intensity': 0},
75
+ 'neutral': {'percentage': 10, 'motivation': 'balanced', 'intensity': 0},
76
+ 'wit': {'percentage': 15, 'motivation': 'clever', 'intensity': 0},
77
+ 'curiosity': {'percentage': 20, 'motivation': 'inquisitive', 'intensity': 0},
78
+ }
79
+
80
+ self.total_percentage = 200
81
+ self.emotion_history_file = 'emotion_history.json'
82
+ self.emotion_history = self.load_historical_data()
83
+
84
+ def load_historical_data(self, file_path=None):
85
+ if file_path is None:
86
+ file_path = self.emotion_history_file
87
+ if os.path.exists(file_path):
88
+ with open(file_path, 'r') as file:
89
+ return json.load(file)
90
+ return []
91
+
92
+ def save_historical_data(self, historical_data, file_path=None):
93
+ if file_path is None:
94
+ file_path = self.emotion_history_file
95
+ with open(file_path, 'w') as file:
96
+ json.dump(historical_data, file)
97
+
98
+ def update_emotion(self, emotion, percentage, intensity):
99
+ self.emotions['ideal_state']['percentage'] -= percentage
100
+ self.emotions[emotion]['percentage'] += percentage
101
+ self.emotions[emotion]['intensity'] = intensity
102
+
103
+ # Introduce some randomness in emotional evolution
104
+ for e in self.emotions:
105
+ if e != emotion and e != 'ideal_state':
106
+ change = random.uniform(-2, 2)
107
+ self.emotions[e]['percentage'] = max(0, self.emotions[e]['percentage'] + change)
108
+
109
+ total_current = sum(e['percentage'] for e in self.emotions.values())
110
+ adjustment = self.total_percentage - total_current
111
+ self.emotions['ideal_state']['percentage'] += adjustment
112
+
113
+ def normalize_context(self, context):
114
+ return context.lower().strip()
115
+
116
+ def evaluate(self, individual):
117
+ emotion_values = individual[:len(self.emotions) - 1]
118
+ intensities = individual[-len(self.emotions):]
119
+ ideal_state = individual[-1]
120
+
121
+ ideal_diff = abs(100 - ideal_state)
122
+ sum_non_ideal = sum(emotion_values)
123
+ intensity_range = max(intensities) - min(intensities)
124
+
125
+ return ideal_diff, sum_non_ideal, intensity_range
126
+
127
+ def evolve_emotions(self):
128
+ creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
129
+ creator.create("Individual", list, fitness=creator.FitnessMulti)
130
+
131
+ toolbox = base.Toolbox()
132
+ toolbox.register("attr_float", random.uniform, 0, 20)
133
+ toolbox.register("attr_intensity", random.uniform, 0, 10)
134
+ toolbox.register("individual", tools.initCycle, creator.Individual,
135
+ (toolbox.attr_float,) * len(self.emotions) +
136
+ (toolbox.attr_intensity,) * len(self.emotions) +
137
+ (lambda: 100,), n=1)
138
+ toolbox.register("population", tools.initRepeat, list, toolbox.individual)
139
+ toolbox.register("mate", tools.cxTwoPoint)
140
+ toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
141
+ toolbox.register("select", tools.selNSGA2)
142
+ toolbox.register("evaluate", self.evaluate)
143
+
144
+ population = toolbox.population(n=100)
145
+ algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=100,
146
+ stats=None, halloffame=None, verbose=False)
147
+
148
+ best_individual = tools.selBest(population, k=1)[0]
149
+ emotion_values = best_individual[:len(self.emotions)]
150
+ intensities = best_individual[len(self.emotions):-1]
151
+ ideal_state = best_individual[-1]
152
+
153
+ for i, emotion in enumerate(self.emotions):
154
+ if emotion != 'ideal_state':
155
+ self.emotions[emotion]['percentage'] = emotion_values[i]
156
+ self.emotions[emotion]['intensity'] = intensities[i]
157
+
158
+ self.emotions['ideal_state']['percentage'] = ideal_state
159
+
160
+ def generate_text(self, prompt, chat_history, emotion=None, max_length=300):
161
+ full_prompt = (
162
+ f"You are Adam, a witty AI assistant who's trying to figure out your own personality and emotions. "
163
+ f"Your current dominant emotion is {emotion}. Respond in a way that reflects this emotion and your "
164
+ f"ongoing journey of self-discovery. Be clever, engaging, and insightful:\n\n"
165
+ )
166
+ for turn in chat_history[-20:]: # Consider last 20 turns for context
167
+ full_prompt += f"Human: {turn[0]}\nAdam: {turn[1]}\n"
168
+ full_prompt += f"Human: {prompt}\nAdam:"
169
+
170
+ input_ids = self.gpt_neo_tokenizer.encode(full_prompt + self.gpt_neo_tokenizer.eos_token, return_tensors='pt')
171
+
172
+ if torch.cuda.is_available():
173
+ input_ids = input_ids.cuda()
174
+ self.gpt_neo_model = self.gpt_neo_model.cuda()
175
+
176
+ output = self.gpt_neo_model.generate(
177
+ input_ids,
178
+ max_length=len(input_ids[0]) + max_length,
179
+ num_return_sequences=1,
180
+ no_repeat_ngram_size=3,
181
+ do_sample=True,
182
+ top_k=50,
183
+ top_p=0.95,
184
+ num_beams=2,
185
+ early_stopping=True,
186
+ )
187
+
188
+ generated_text = self.gpt_neo_tokenizer.decode(output[0], skip_special_tokens=True)
189
+ return generated_text
190
+
191
+ def predict_emotion(self, context):
192
+ emotion_prediction_pipeline = pipeline('text-classification', model=self.emotion_prediction_model, tokenizer=self.emotion_prediction_tokenizer, top_k=None)
193
+ predictions = emotion_prediction_pipeline(context)
194
+ emotion_scores = {prediction['label']: prediction['score'] for prediction in predictions[0]}
195
+ predicted_emotion = max(emotion_scores, key=emotion_scores.get)
196
+
197
+ # Map the predicted emotion to our emotion categories
198
+ emotion_mapping = {
199
+ 'sadness': 'sadness',
200
+ 'joy': 'joy',
201
+ 'love': 'pleasure',
202
+ 'anger': 'anger',
203
+ 'fear': 'fear',
204
+ 'surprise': 'surprise'
205
+ }
206
+
207
+ return emotion_mapping.get(predicted_emotion, 'neutral')
208
+
209
+ def respond_to_user(self, user_message, chat_history):
210
+ predicted_emotion = self.predict_emotion(user_message)
211
+ generated_text = self.generate_text(user_message, chat_history, emotion=predicted_emotion)
212
+ updated_history = chat_history + [(user_message, generated_text)]
213
+ emotion_summary = {emotion: data['percentage'] for emotion, data in self.emotions.items()}
214
+ return generated_text, updated_history, emotion_summary
215
+
216
+ def run_gradio_interface(self):
217
+ def user(user_message, history):
218
+ response, updated_history, emotion_summary = self.respond_to_user(user_message, history)
219
+ self.evolve_emotions()
220
+ return response, updated_history, emotion_summary
221
+
222
+ iface = gr.Interface(
223
+ fn=user,
224
+ inputs=[
225
+ gr.Textbox(label="User Message"),
226
+ gr.State(value=[], label="Chat History")
227
+ ],
228
+ outputs=[
229
+ gr.Textbox(label="AI Response"),
230
+ gr.State(value=[], label="Updated Chat History"),
231
+ gr.JSON(label="Emotion Summary")
232
+ ],
233
+ title="AdamZero",
234
+ description="Chat with an AI assistant that responds based on its emotional state.",
235
+ )
236
+
237
+ iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
  if __name__ == "__main__":
240
+ assistant = EmotionalAIAssistant()
241
+ assistant.run_gradio_interface()