Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -9,234 +9,277 @@ import torch
|
|
9 |
from sklearn.ensemble import RandomForestClassifier
|
10 |
from sklearn.model_selection import train_test_split
|
11 |
from sklearn.preprocessing import OneHotEncoder
|
12 |
-
from transformers import AutoModelForSequenceClassification, AutoTokenizer,
|
13 |
from deap import base, creator, tools, algorithms
|
14 |
import gc
|
15 |
|
16 |
warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
|
240 |
if __name__ == "__main__":
|
241 |
-
|
242 |
-
assistant.run_gradio_interface()
|
|
|
9 |
from sklearn.ensemble import RandomForestClassifier
|
10 |
from sklearn.model_selection import train_test_split
|
11 |
from sklearn.preprocessing import OneHotEncoder
|
12 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
|
13 |
from deap import base, creator, tools, algorithms
|
14 |
import gc
|
15 |
|
16 |
warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
|
17 |
|
18 |
+
# Initialize Example Emotions Dataset
|
19 |
+
data = {
|
20 |
+
'context': [
|
21 |
+
'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
|
22 |
+
'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated',
|
23 |
+
'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated',
|
24 |
+
'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic',
|
25 |
+
'I am pessimistic', 'I feel bored', 'I am envious'
|
26 |
+
],
|
27 |
+
'emotion': [
|
28 |
+
'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger',
|
29 |
+
'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust',
|
30 |
+
'disgust', 'optimism', 'pessimism', 'boredom', 'envy'
|
31 |
+
]
|
32 |
+
}
|
33 |
+
df = pd.DataFrame(data)
|
34 |
+
|
35 |
+
# Encoding the contexts using One-Hot Encoding (memory-efficient)
|
36 |
+
encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
|
37 |
+
contexts_encoded = encoder.fit_transform(df[['context']])
|
38 |
+
|
39 |
+
# Encoding emotions
|
40 |
+
emotions_target = pd.Categorical(df['emotion']).codes
|
41 |
+
emotion_classes = pd.Categorical(df['emotion']).categories
|
42 |
+
|
43 |
+
# Load pre-trained BERT model for emotion prediction
|
44 |
+
emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
|
45 |
+
emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
|
46 |
+
|
47 |
+
# Lazy loading for the fine-tuned language model (DialoGPT-medium)
|
48 |
+
_finetuned_lm_tokenizer = None
|
49 |
+
_finetuned_lm_model = None
|
50 |
+
|
51 |
+
def get_finetuned_lm_model():
|
52 |
+
global _finetuned_lm_tokenizer, _finetuned_lm_model
|
53 |
+
if _finetuned_lm_tokenizer is None or _finetuned_lm_model is None:
|
54 |
+
model_name = "microsoft/DialoGPT-medium"
|
55 |
+
_finetuned_lm_tokenizer = AutoTokenizer.from_pretrained(model_name)
|
56 |
+
_finetuned_lm_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", low_cpu_mem_usage=True)
|
57 |
+
_finetuned_lm_tokenizer.pad_token = _finetuned_lm_tokenizer.eos_token
|
58 |
+
return _finetuned_lm_tokenizer, _finetuned_lm_model
|
59 |
+
|
60 |
+
# Enhanced Emotional States
|
61 |
+
emotions = {
|
62 |
+
'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
|
63 |
+
'pleasure': {'percentage': 10, 'motivation': 'selfish', 'intensity': 0},
|
64 |
+
'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
|
65 |
+
'grief': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
|
66 |
+
'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
|
67 |
+
'calmness': {'percentage': 10, 'motivation': 'neutral', 'intensity': 0},
|
68 |
+
'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
|
69 |
+
'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
|
70 |
+
'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
|
71 |
+
'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
|
72 |
+
'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
|
73 |
+
'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
|
74 |
+
'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
|
75 |
+
'anticipation': {'percentage': 10, 'motivation': 'predictive', 'intensity': 0},
|
76 |
+
'trust': {'percentage': 10, 'motivation': 'reliable', 'intensity': 0},
|
77 |
+
'disgust': {'percentage': 10, 'motivation': 'repulsive', 'intensity': 0},
|
78 |
+
'optimism': {'percentage': 10, 'motivation': 'hopeful', 'intensity': 0},
|
79 |
+
'pessimism': {'percentage': 10, 'motivation': 'doubtful', 'intensity': 0},
|
80 |
+
'boredom': {'percentage': 10, 'motivation': 'indifferent', 'intensity': 0},
|
81 |
+
'envy': {'percentage': 10, 'motivation': 'jealous', 'intensity': 0},
|
82 |
+
'neutral': {'percentage': 10, 'motivation': 'balanced', 'intensity': 0},
|
83 |
+
'wit': {'percentage': 15, 'motivation': 'clever', 'intensity': 0},
|
84 |
+
'curiosity': {'percentage': 20, 'motivation': 'inquisitive', 'intensity': 0},
|
85 |
+
}
|
86 |
+
|
87 |
+
total_percentage = 200
|
88 |
+
emotion_history_file = 'emotion_history.json'
|
89 |
+
|
90 |
+
def load_historical_data(file_path=emotion_history_file):
|
91 |
+
if os.path.exists(file_path):
|
92 |
+
with open(file_path, 'r') as file:
|
93 |
+
return json.load(file)
|
94 |
+
return []
|
95 |
+
|
96 |
+
def save_historical_data(historical_data, file_path=emotion_history_file):
|
97 |
+
with open(file_path, 'w') as file:
|
98 |
+
json.dump(historical_data, file)
|
99 |
+
|
100 |
+
emotion_history = load_historical_data()
|
101 |
+
|
102 |
+
def update_emotion(emotion, percentage, intensity):
|
103 |
+
emotions['ideal_state']['percentage'] -= percentage
|
104 |
+
emotions[emotion]['percentage'] += percentage
|
105 |
+
emotions[emotion]['intensity'] = intensity
|
106 |
+
|
107 |
+
# Introduce some randomness in emotional evolution
|
108 |
+
for e in emotions:
|
109 |
+
if e != emotion and e != 'ideal_state':
|
110 |
+
change = random.uniform(-2, 2)
|
111 |
+
emotions[e]['percentage'] = max(0, emotions[e]['percentage'] + change)
|
112 |
+
|
113 |
+
total_current = sum(e['percentage'] for e in emotions.values())
|
114 |
+
adjustment = total_percentage - total_current
|
115 |
+
emotions['ideal_state']['percentage'] += adjustment
|
116 |
+
|
117 |
+
def normalize_context(context):
|
118 |
+
return context.lower().strip()
|
119 |
+
|
120 |
+
def evaluate(individual):
|
121 |
+
emotion_values = individual[:len(emotions) - 1]
|
122 |
+
intensities = individual[-len(emotions):]
|
123 |
+
ideal_state = individual[-1]
|
124 |
+
|
125 |
+
ideal_diff = abs(100 - ideal_state)
|
126 |
+
sum_non_ideal = sum(emotion_values)
|
127 |
+
intensity_range = max(intensities) - min(intensities)
|
128 |
+
|
129 |
+
return ideal_diff, sum_non_ideal, intensity_range
|
130 |
+
|
131 |
+
def evolve_emotions():
|
132 |
+
creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
|
133 |
+
creator.create("Individual", list, fitness=creator.FitnessMulti)
|
134 |
+
|
135 |
+
toolbox = base.Toolbox()
|
136 |
+
toolbox.register("attr_float", random.uniform, 0, 20)
|
137 |
+
toolbox.register("attr_intensity", random.uniform, 0, 10)
|
138 |
+
toolbox.register("individual", tools.initCycle, creator.Individual,
|
139 |
+
(toolbox.attr_float,) * (len(emotions) - 1) +
|
140 |
+
(toolbox.attr_intensity,) * len(emotions) +
|
141 |
+
(lambda: 100,), n=1)
|
142 |
+
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
|
143 |
+
toolbox.register("mate", tools.cxTwoPoint)
|
144 |
+
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
|
145 |
+
toolbox.register("select", tools.selNSGA2)
|
146 |
+
toolbox.register("evaluate", evaluate)
|
147 |
+
|
148 |
+
population = toolbox.population(n=100)
|
149 |
+
algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=100,
|
150 |
+
stats=None, halloffame=None, verbose=False)
|
151 |
+
|
152 |
+
best_individual = tools.selBest(population, k=1)[0]
|
153 |
+
emotion_values = best_individual[:len(emotions) - 1]
|
154 |
+
intensities = best_individual[-len(emotions):]
|
155 |
+
ideal_state = best_individual[-1]
|
156 |
+
|
157 |
+
for i, emotion in enumerate(emotions):
|
158 |
+
if emotion != 'ideal_state':
|
159 |
+
emotions[emotion]['percentage'] = emotion_values[i]
|
160 |
+
emotions[emotion]['intensity'] = intensities[i]
|
161 |
+
|
162 |
+
emotions['ideal_state']['percentage'] = ideal_state
|
163 |
+
|
164 |
+
def predict_emotion(context):
|
165 |
+
emotion_prediction_pipeline = pipeline('text-classification', model=emotion_prediction_model, tokenizer=emotion_prediction_tokenizer, top_k=None)
|
166 |
+
predictions = emotion_prediction_pipeline(context)
|
167 |
+
emotion_scores = {prediction['label']: prediction['score'] for prediction in predictions[0]}
|
168 |
+
predicted_emotion = max(emotion_scores, key=emotion_scores.get)
|
169 |
+
|
170 |
+
# Map the predicted emotion to our emotion categories
|
171 |
+
emotion_mapping = {
|
172 |
+
'sadness': 'sadness',
|
173 |
+
'joy': 'joy',
|
174 |
+
'love': 'pleasure',
|
175 |
+
'anger': 'anger',
|
176 |
+
'fear': 'fear',
|
177 |
+
'surprise': 'surprise'
|
178 |
+
}
|
179 |
+
|
180 |
+
return emotion_mapping.get(predicted_emotion, 'neutral')
|
181 |
+
|
182 |
+
def generate_text(prompt, chat_history, emotion=None, max_length=150):
|
183 |
+
finetuned_lm_tokenizer, finetuned_lm_model = get_finetuned_lm_model()
|
184 |
+
|
185 |
+
full_prompt = (
|
186 |
+
f"You are Adam, a witty AI assistant who's trying to figure out your own personality and emotions. "
|
187 |
+
f"Your current dominant emotion is {emotion}. Respond in a way that reflects this emotion and your "
|
188 |
+
f"ongoing journey of self-discovery. Be clever and engaging:\n\n"
|
189 |
+
)
|
190 |
+
for turn in chat_history[-3:]: # Consider last 3 turns for context
|
191 |
+
full_prompt += f"Human: {turn[0]}\nAdam: {turn[1]}\n"
|
192 |
+
full_prompt += f"Human: {prompt}\nAdam:"
|
193 |
+
|
194 |
+
input_ids = finetuned_lm_tokenizer.encode(full_prompt + finetuned_lm_tokenizer.eos_token, return_tensors='pt')
|
195 |
+
|
196 |
+
if torch.cuda.is_available():
|
197 |
+
input_ids = input_ids.cuda()
|
198 |
+
finetuned_lm_model = finetuned_lm_model.cuda()
|
199 |
+
|
200 |
+
output = finetuned_lm_model.generate(
|
201 |
+
input_ids,
|
202 |
+
max_length=len(input_ids[0]) + max_length,
|
203 |
+
num_return_sequences=1,
|
204 |
+
no_repeat_ngram_size=2,
|
205 |
+
do_sample=True,
|
206 |
+
temperature=0.8, # Slightly increased for more creative responses
|
207 |
+
top_k=50,
|
208 |
+
top_p=0.95,
|
209 |
+
pad_token_id=finetuned_lm_tokenizer.eos_token_id
|
210 |
+
)
|
211 |
+
|
212 |
+
generated_text = finetuned_lm_tokenizer.decode(output[0][input_ids.shape[1]:], skip_special_tokens=True)
|
213 |
+
return generated_text.strip()
|
214 |
+
|
215 |
+
def update_emotion_history(emotion, intensity):
|
216 |
+
global emotion_history
|
217 |
+
emotion_history.append({
|
218 |
+
'emotion': emotion,
|
219 |
+
'intensity': intensity,
|
220 |
+
'timestamp': pd.Timestamp.now().isoformat()
|
221 |
+
})
|
222 |
+
save_historical_data(emotion_history)
|
223 |
+
|
224 |
+
def get_dominant_emotion():
|
225 |
+
return max(emotions, key=lambda x: emotions[x]['percentage'] if x != 'ideal_state' else 0)
|
226 |
+
|
227 |
+
def get_emotion_summary():
|
228 |
+
summary = []
|
229 |
+
for emotion, data in emotions.items():
|
230 |
+
if emotion != 'ideal_state':
|
231 |
+
summary.append(f"{emotion.capitalize()}: {data['percentage']:.1f}% (Intensity: {data['intensity']:.1f})")
|
232 |
+
return "\n".join(summary)
|
233 |
+
|
234 |
+
def reset_emotions():
|
235 |
+
global emotions
|
236 |
+
for emotion in emotions:
|
237 |
+
if emotion != 'ideal_state':
|
238 |
+
emotions[emotion]['percentage'] = 10
|
239 |
+
emotions[emotion]['intensity'] = 0
|
240 |
+
emotions['ideal_state']['percentage'] = 100
|
241 |
+
return get_emotion_summary()
|
242 |
+
|
243 |
+
def respond_to_user(user_input, chat_history):
|
244 |
+
predicted_emotion = predict_emotion(user_input)
|
245 |
+
|
246 |
+
if predicted_emotion not in emotions:
|
247 |
+
predicted_emotion = 'neutral'
|
248 |
+
|
249 |
+
update_emotion(predicted_emotion, 5, random.uniform(0, 10))
|
250 |
+
|
251 |
+
dominant_emotion = get_dominant_emotion()
|
252 |
+
|
253 |
+
response = generate_text(user_input, chat_history, dominant_emotion)
|
254 |
+
|
255 |
+
update_emotion_history(predicted_emotion, emotions[predicted_emotion]['intensity'])
|
256 |
+
|
257 |
+
chat_history.append((user_input, response))
|
258 |
+
|
259 |
+
if len(chat_history) % 5 == 0:
|
260 |
+
evolve_emotions()
|
261 |
+
|
262 |
+
return response, chat_history, get_emotion_summary()
|
263 |
+
|
264 |
+
# Gradio interface
|
265 |
+
with gr.Blocks() as demo:
|
266 |
+
gr.Markdown("# Adam: The Self-Discovering Emotion-Aware AI Chatbot")
|
267 |
+
gr.Markdown("Chat with Adam, a witty AI assistant trying to figure out its own personality and emotions.")
|
268 |
+
|
269 |
+
chatbot = gr.Chatbot()
|
270 |
+
msg = gr.Textbox(label="Type your message here...")
|
271 |
+
clear = gr.Button("Clear")
|
272 |
+
|
273 |
+
emotion_state = gr.Textbox(label="Adam's Current Emotional State", lines=10)
|
274 |
+
reset_button = gr.Button("Reset Adam's Emotions")
|
275 |
+
|
276 |
+
def user(user_message, history):
|
277 |
+
response, updated_history, emotion_summary = respond_to_user(user_message, history)
|
278 |
+
return "", updated_history, emotion_summary
|
279 |
+
|
280 |
+
msg.submit(user, [msg, chatbot], [msg, chatbot, emotion_state])
|
281 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
282 |
+
reset_button.click(reset_emotions, None, emotion_state, queue=False)
|
283 |
|
284 |
if __name__ == "__main__":
|
285 |
+
demo.launch()
|
|