Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -9,276 +9,233 @@ import torch
|
|
9 |
from sklearn.ensemble import RandomForestClassifier
|
10 |
from sklearn.model_selection import train_test_split
|
11 |
from sklearn.preprocessing import OneHotEncoder
|
12 |
-
from transformers import AutoModelForSequenceClassification, AutoTokenizer,
|
13 |
from deap import base, creator, tools, algorithms
|
14 |
import gc
|
15 |
|
16 |
warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
)
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
emotions[emotion]['intensity'] = 0
|
239 |
-
emotions['ideal_state']['percentage'] = 100
|
240 |
-
return get_emotion_summary()
|
241 |
-
|
242 |
-
def respond_to_user(user_input, chat_history):
|
243 |
-
predicted_emotion = predict_emotion(user_input)
|
244 |
-
|
245 |
-
if predicted_emotion not in emotions:
|
246 |
-
predicted_emotion = 'neutral'
|
247 |
-
|
248 |
-
update_emotion(predicted_emotion, 5, random.uniform(0, 10))
|
249 |
-
|
250 |
-
dominant_emotion = get_dominant_emotion()
|
251 |
-
|
252 |
-
response = generate_text(user_input, chat_history, dominant_emotion)
|
253 |
-
|
254 |
-
update_emotion_history(predicted_emotion, emotions[predicted_emotion]['intensity'])
|
255 |
-
|
256 |
-
chat_history.append((user_input, response))
|
257 |
-
|
258 |
-
if len(chat_history) % 5 == 0:
|
259 |
-
evolve_emotions()
|
260 |
-
|
261 |
-
return response, chat_history, get_emotion_summary()
|
262 |
-
|
263 |
-
# Gradio interface
|
264 |
-
with gr.Blocks() as demo:
|
265 |
-
gr.Markdown("# Adam: The Self-Discovering Emotion-Aware AI Chatbot")
|
266 |
-
gr.Markdown("Chat with Adam, a witty AI assistant trying to figure out its own personality and emotions.")
|
267 |
-
|
268 |
-
chatbot = gr.Chatbot()
|
269 |
-
msg = gr.Textbox(label="Type your message here...")
|
270 |
-
clear = gr.Button("Clear")
|
271 |
-
|
272 |
-
emotion_state = gr.Textbox(label="Adam's Current Emotional State", lines=10)
|
273 |
-
reset_button = gr.Button("Reset Adam's Emotions")
|
274 |
-
|
275 |
-
def user(user_message, history):
|
276 |
-
response, updated_history, emotion_summary = respond_to_user(user_message, history)
|
277 |
-
return "", updated_history, emotion_summary
|
278 |
-
|
279 |
-
msg.submit(user, [msg, chatbot], [msg, chatbot, emotion_state])
|
280 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
281 |
-
reset_button.click(reset_emotions, None, emotion_state, queue=False)
|
282 |
|
283 |
if __name__ == "__main__":
|
284 |
-
|
|
|
|
9 |
from sklearn.ensemble import RandomForestClassifier
|
10 |
from sklearn.model_selection import train_test_split
|
11 |
from sklearn.preprocessing import OneHotEncoder
|
12 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, GPTNeoForCausalLM, GPTNeoTokenizer, pipeline
|
13 |
from deap import base, creator, tools, algorithms
|
14 |
import gc
|
15 |
|
16 |
warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
|
17 |
|
18 |
+
class EmotionalAIAssistant:
|
19 |
+
def __init__(self):
|
20 |
+
# Initialize Example Emotions Dataset
|
21 |
+
self.data = {
|
22 |
+
'context': [
|
23 |
+
'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
|
24 |
+
'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated',
|
25 |
+
'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated',
|
26 |
+
'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic',
|
27 |
+
'I am pessimistic', 'I feel bored', 'I am envious'
|
28 |
+
],
|
29 |
+
'emotion': [
|
30 |
+
'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger',
|
31 |
+
'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust',
|
32 |
+
'disgust', 'optimism', 'pessimism', 'boredom', 'envy'
|
33 |
+
]
|
34 |
+
}
|
35 |
+
self.df = pd.DataFrame(self.data)
|
36 |
+
|
37 |
+
# Encoding the contexts using One-Hot Encoding (memory-efficient)
|
38 |
+
self.encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
|
39 |
+
self.contexts_encoded = self.encoder.fit_transform(self.df[['context']])
|
40 |
+
|
41 |
+
# Encoding emotions
|
42 |
+
self.emotions_target = pd.Categorical(self.df['emotion']).codes
|
43 |
+
self.emotion_classes = pd.Categorical(self.df['emotion']).categories
|
44 |
+
|
45 |
+
# Load pre-trained BERT model for emotion prediction
|
46 |
+
self.emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
|
47 |
+
self.emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion", padding_side='left')
|
48 |
+
|
49 |
+
# Load pre-trained GPT-Neo-2.7B model for text generation
|
50 |
+
self.gpt_neo_tokenizer = GPTNeoTokenizer.from_pretrained('EleutherAI/gpt-neo-2.7B')
|
51 |
+
self.gpt_neo_model = GPTNeoForCausalLM.from_pretrained('EleutherAI/gpt-neo-2.7B', device_map='auto')
|
52 |
+
|
53 |
+
# Enhanced Emotional States
|
54 |
+
self.emotions = {
|
55 |
+
'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
|
56 |
+
'pleasure': {'percentage': 10, 'motivation': 'selfish', 'intensity': 0},
|
57 |
+
'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
|
58 |
+
'grief': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
|
59 |
+
'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
|
60 |
+
'calmness': {'percentage': 10, 'motivation': 'neutral', 'intensity': 0},
|
61 |
+
'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
|
62 |
+
'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
|
63 |
+
'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
|
64 |
+
'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
|
65 |
+
'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
|
66 |
+
'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
|
67 |
+
'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
|
68 |
+
'anticipation': {'percentage': 10, 'motivation': 'predictive', 'intensity': 0},
|
69 |
+
'trust': {'percentage': 10, 'motivation': 'reliable', 'intensity': 0},
|
70 |
+
'disgust': {'percentage': 10, 'motivation': 'repulsive', 'intensity': 0},
|
71 |
+
'optimism': {'percentage': 10, 'motivation': 'hopeful', 'intensity': 0},
|
72 |
+
'pessimism': {'percentage': 10, 'motivation': 'doubtful', 'intensity': 0},
|
73 |
+
'boredom': {'percentage': 10, 'motivation': 'indifferent', 'intensity': 0},
|
74 |
+
'envy': {'percentage': 10, 'motivation': 'jealous', 'intensity': 0},
|
75 |
+
'neutral': {'percentage': 10, 'motivation': 'balanced', 'intensity': 0},
|
76 |
+
'wit': {'percentage': 15, 'motivation': 'clever', 'intensity': 0},
|
77 |
+
'curiosity': {'percentage': 20, 'motivation': 'inquisitive', 'intensity': 0},
|
78 |
+
}
|
79 |
+
|
80 |
+
self.total_percentage = 200
|
81 |
+
self.emotion_history_file = 'emotion_history.json'
|
82 |
+
self.emotion_history = self.load_historical_data()
|
83 |
+
|
84 |
+
def load_historical_data(self, file_path=None):
|
85 |
+
if file_path is None:
|
86 |
+
file_path = self.emotion_history_file
|
87 |
+
if os.path.exists(file_path):
|
88 |
+
with open(file_path, 'r') as file:
|
89 |
+
return json.load(file)
|
90 |
+
return []
|
91 |
+
|
92 |
+
def save_historical_data(self, historical_data, file_path=None):
|
93 |
+
if file_path is None:
|
94 |
+
file_path = self.emotion_history_file
|
95 |
+
with open(file_path, 'w') as file:
|
96 |
+
json.dump(historical_data, file)
|
97 |
+
|
98 |
+
def update_emotion(self, emotion, percentage, intensity):
|
99 |
+
self.emotions['ideal_state']['percentage'] -= percentage
|
100 |
+
self.emotions[emotion]['percentage'] += percentage
|
101 |
+
self.emotions[emotion]['intensity'] = intensity
|
102 |
+
|
103 |
+
# Introduce some randomness in emotional evolution
|
104 |
+
for e in self.emotions:
|
105 |
+
if e != emotion and e != 'ideal_state':
|
106 |
+
change = random.uniform(-2, 2)
|
107 |
+
self.emotions[e]['percentage'] = max(0, self.emotions[e]['percentage'] + change)
|
108 |
+
|
109 |
+
total_current = sum(e['percentage'] for e in self.emotions.values())
|
110 |
+
adjustment = self.total_percentage - total_current
|
111 |
+
self.emotions['ideal_state']['percentage'] += adjustment
|
112 |
+
|
113 |
+
def normalize_context(self, context):
|
114 |
+
return context.lower().strip()
|
115 |
+
|
116 |
+
def evaluate(self, individual):
|
117 |
+
emotion_values = individual[:len(self.emotions) - 1]
|
118 |
+
intensities = individual[-len(self.emotions):]
|
119 |
+
ideal_state = individual[-1]
|
120 |
+
|
121 |
+
ideal_diff = abs(100 - ideal_state)
|
122 |
+
sum_non_ideal = sum(emotion_values)
|
123 |
+
intensity_range = max(intensities) - min(intensities)
|
124 |
+
|
125 |
+
return ideal_diff, sum_non_ideal, intensity_range
|
126 |
+
|
127 |
+
def evolve_emotions(self):
|
128 |
+
creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
|
129 |
+
creator.create("Individual", list, fitness=creator.FitnessMulti)
|
130 |
+
|
131 |
+
toolbox = base.Toolbox()
|
132 |
+
toolbox.register("attr_float", random.uniform, 0, 20)
|
133 |
+
toolbox.register("attr_intensity", random.uniform, 0, 10)
|
134 |
+
toolbox.register("individual", tools.initCycle, creator.Individual,
|
135 |
+
(toolbox.attr_float,) * len(self.emotions) +
|
136 |
+
(toolbox.attr_intensity,) * len(self.emotions) +
|
137 |
+
(lambda: 100,), n=1)
|
138 |
+
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
|
139 |
+
toolbox.register("mate", tools.cxTwoPoint)
|
140 |
+
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
|
141 |
+
toolbox.register("select", tools.selNSGA2)
|
142 |
+
toolbox.register("evaluate", self.evaluate)
|
143 |
+
|
144 |
+
population = toolbox.population(n=100)
|
145 |
+
algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=100,
|
146 |
+
stats=None, halloffame=None, verbose=False)
|
147 |
+
|
148 |
+
best_individual = tools.selBest(population, k=1)[0]
|
149 |
+
emotion_values = best_individual[:len(self.emotions)]
|
150 |
+
intensities = best_individual[len(self.emotions):-1]
|
151 |
+
ideal_state = best_individual[-1]
|
152 |
+
|
153 |
+
for i, emotion in enumerate(self.emotions):
|
154 |
+
if emotion != 'ideal_state':
|
155 |
+
self.emotions[emotion]['percentage'] = emotion_values[i]
|
156 |
+
self.emotions[emotion]['intensity'] = intensities[i]
|
157 |
+
|
158 |
+
self.emotions['ideal_state']['percentage'] = ideal_state
|
159 |
+
|
160 |
+
def generate_text(self, prompt, chat_history, emotion=None, max_length=300):
|
161 |
+
full_prompt = (
|
162 |
+
f"You are Adam, a witty AI assistant who's trying to figure out your own personality and emotions. "
|
163 |
+
f"Your current dominant emotion is {emotion}. Respond in a way that reflects this emotion and your "
|
164 |
+
f"ongoing journey of self-discovery. Be clever, engaging, and insightful:\n\n"
|
165 |
+
)
|
166 |
+
for turn in chat_history[-20:]: # Consider last 20 turns for context
|
167 |
+
full_prompt += f"Human: {turn[0]}\nAdam: {turn[1]}\n"
|
168 |
+
full_prompt += f"Human: {prompt}\nAdam:"
|
169 |
+
|
170 |
+
input_ids = self.gpt_neo_tokenizer.encode(full_prompt + self.gpt_neo_tokenizer.eos_token, return_tensors='pt')
|
171 |
+
|
172 |
+
if torch.cuda.is_available():
|
173 |
+
input_ids = input_ids.cuda()
|
174 |
+
self.gpt_neo_model = self.gpt_neo_model.cuda()
|
175 |
+
|
176 |
+
output = self.gpt_neo_model.generate(
|
177 |
+
input_ids,
|
178 |
+
max_length=len(input_ids[0]) + max_length,
|
179 |
+
num_return_sequences=1,
|
180 |
+
no_repeat_ngram_size=3,
|
181 |
+
do_sample=True,
|
182 |
+
top_k=50,
|
183 |
+
top_p=0.95,
|
184 |
+
num_beams=2,
|
185 |
+
early_stopping=True,
|
186 |
+
)
|
187 |
+
|
188 |
+
generated_text = self.gpt_neo_tokenizer.decode(output[0], skip_special_tokens=True)
|
189 |
+
return generated_text
|
190 |
+
|
191 |
+
def predict_emotion(self, context):
|
192 |
+
emotion_prediction_pipeline = pipeline('text-classification', model=self.emotion_prediction_model, tokenizer=self.emotion_prediction_tokenizer, top_k=None)
|
193 |
+
predictions = emotion_prediction_pipeline(context)
|
194 |
+
emotion_scores = {prediction['label']: prediction['score'] for prediction in predictions[0]}
|
195 |
+
predicted_emotion = max(emotion_scores, key=emotion_scores.get)
|
196 |
+
|
197 |
+
# Map the predicted emotion to our emotion categories
|
198 |
+
emotion_mapping = {
|
199 |
+
'sadness': 'sadness',
|
200 |
+
'joy': 'joy',
|
201 |
+
'love': 'pleasure',
|
202 |
+
'anger': 'anger',
|
203 |
+
'fear': 'fear',
|
204 |
+
'surprise': 'surprise'
|
205 |
+
}
|
206 |
+
|
207 |
+
return emotion_mapping.get(predicted_emotion, 'neutral')
|
208 |
+
|
209 |
+
def respond_to_user(self, user_message, chat_history):
|
210 |
+
predicted_emotion = self.predict_emotion(user_message)
|
211 |
+
generated_text = self.generate_text(user_message, chat_history, emotion=predicted_emotion)
|
212 |
+
updated_history = chat_history + [(user_message, generated_text)]
|
213 |
+
emotion_summary = {emotion: data['percentage'] for emotion, data in self.emotions.items()}
|
214 |
+
return generated_text, updated_history, emotion_summary
|
215 |
+
|
216 |
+
def run_gradio_interface(self):
|
217 |
+
def user(user_message, history):
|
218 |
+
response, updated_history, emotion_summary = self.respond_to_user(user_message, history)
|
219 |
+
self.evolve_emotions()
|
220 |
+
return response, updated_history, emotion_summary
|
221 |
+
|
222 |
+
iface = gr.Interface(
|
223 |
+
fn=user,
|
224 |
+
inputs=[
|
225 |
+
gr.Textbox(label="User Message"),
|
226 |
+
gr.State(value=[], label="Chat History")
|
227 |
+
],
|
228 |
+
outputs=[
|
229 |
+
gr.Textbox(label="AI Response"),
|
230 |
+
gr.State(value=[], label="Updated Chat History"),
|
231 |
+
gr.JSON(label="Emotion Summary")
|
232 |
+
],
|
233 |
+
title="AdamZero",
|
234 |
+
description="Chat with an AI assistant that responds based on its emotional state.",
|
235 |
+
)
|
236 |
+
|
237 |
+
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
|
239 |
if __name__ == "__main__":
|
240 |
+
assistant = EmotionalAIAssistant()
|
241 |
+
assistant.run_gradio_interface()
|