File size: 11,779 Bytes
77a0774
 
5b50796
 
 
 
 
 
26bca4f
5b50796
 
77a0774
20e25d2
5b50796
 
 
 
df44613
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e51edb9
77a0774
 
 
 
 
df44613
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a74878c
77a0774
df44613
 
 
 
 
 
 
 
 
 
77a0774
df44613
 
 
77a0774
df44613
77a0774
df44613
 
 
 
 
 
 
 
 
 
 
77a0774
 
e51edb9
df44613
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12380c1
a74878c
12380c1
9e686db
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
import warnings
import numpy as np
import pandas as pd
import os
import json
import random
import gradio as gr
import torch
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from transformers import AutoModelForSequenceClassification, AutoTokenizer, MegatronLMHeadModel, MegatronTokenizer, pipeline
from deap import base, creator, tools, algorithms
import gc

warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')

class EmotionalAIAssistant:
    def __init__(self):
        # Initialize Example Emotions Dataset
        self.data = {
            'context': [
                'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
                'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated',
                'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated',
                'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic',
                'I am pessimistic', 'I feel bored', 'I am envious'
            ],
            'emotion': [
                'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger',
                'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust',
                'disgust', 'optimism', 'pessimism', 'boredom', 'envy'
            ]
        }
        self.df = pd.DataFrame(self.data)

        # Encoding the contexts using One-Hot Encoding (memory-efficient)
        self.encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
        self.contexts_encoded = self.encoder.fit_transform(self.df[['context']])

        # Encoding emotions
        self.emotions_target = pd.Categorical(self.df['emotion']).codes
        self.emotion_classes = pd.Categorical(self.df['emotion']).categories

        # Load pre-trained BERT model for emotion prediction
        self.emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
        self.emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion", padding_side='left')
 
       # Load pre-trained Megatron-LM model for text generation
        self.megatron_tokenizer = MegatronTokenizer.from_pretrained('nvidia/megatron-lm-330m')
        self.megatron_model = MegatronLMHeadModel.from_pretrained('nvidia/megatron-lm-330m', device_map='auto'
        
    # Enhanced Emotional States
        self.emotions = {
            'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
            'pleasure': {'percentage': 10, 'motivation': 'selfish', 'intensity': 0},
            'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
            'grief': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
            'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
            'calmness': {'percentage': 10, 'motivation': 'neutral', 'intensity': 0},
            'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
            'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
            'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
            'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
            'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
            'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
            'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
            'anticipation': {'percentage': 10, 'motivation': 'predictive', 'intensity': 0},
            'trust': {'percentage': 10, 'motivation': 'reliable', 'intensity': 0},
            'disgust': {'percentage': 10, 'motivation': 'repulsive', 'intensity': 0},
            'optimism': {'percentage': 10, 'motivation': 'hopeful', 'intensity': 0},
            'pessimism': {'percentage': 10, 'motivation': 'doubtful', 'intensity': 0},
            'boredom': {'percentage': 10, 'motivation': 'indifferent', 'intensity': 0},
            'envy': {'percentage': 10, 'motivation': 'jealous', 'intensity': 0},
            'neutral': {'percentage': 10, 'motivation': 'balanced', 'intensity': 0},
            'wit': {'percentage': 15, 'motivation': 'clever', 'intensity': 0},
            'curiosity': {'percentage': 20, 'motivation': 'inquisitive', 'intensity': 0},
        }

        self.total_percentage = 200
        self.emotion_history_file = 'emotion_history.json'
        self.emotion_history = self.load_historical_data()

    def load_historical_data(self, file_path=None):
        if file_path is None:
            file_path = self.emotion_history_file
        if os.path.exists(file_path):
            with open(file_path, 'r') as file:
                return json.load(file)
        return []

    def save_historical_data(self, historical_data, file_path=None):
        if file_path is None:
            file_path = self.emotion_history_file
        with open(file_path, 'w') as file:
            json.dump(historical_data, file)

    def update_emotion(self, emotion, percentage, intensity):
        self.emotions['ideal_state']['percentage'] -= percentage
        self.emotions[emotion]['percentage'] += percentage
        self.emotions[emotion]['intensity'] = intensity

        # Introduce some randomness in emotional evolution
        for e in self.emotions:
            if e != emotion and e != 'ideal_state':
                change = random.uniform(-2, 2)
                self.emotions[e]['percentage'] = max(0, self.emotions[e]['percentage'] + change)

        total_current = sum(e['percentage'] for e in self.emotions.values())
        adjustment = self.total_percentage - total_current
        self.emotions['ideal_state']['percentage'] += adjustment

    def normalize_context(self, context):
        return context.lower().strip()

    def evaluate(self, individual):
        emotion_values = individual[:len(self.emotions) - 1]
        intensities = individual[-len(self.emotions):]
        ideal_state = individual[-1]
        
        ideal_diff = abs(100 - ideal_state)
        sum_non_ideal = sum(emotion_values)
        intensity_range = max(intensities) - min(intensities)
        
        return ideal_diff, sum_non_ideal, intensity_range

    def evolve_emotions(self):
        creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
        creator.create("Individual", list, fitness=creator.FitnessMulti)

        toolbox = base.Toolbox()
        toolbox.register("attr_float", random.uniform, 0, 20)
        toolbox.register("attr_intensity", random.uniform, 0, 10)
        toolbox.register("individual", tools.initCycle, creator.Individual,
                         (toolbox.attr_float,) * len(self.emotions) +
                         (toolbox.attr_intensity,) * len(self.emotions) +
                         (lambda: 100,), n=1)
        toolbox.register("population", tools.initRepeat, list, toolbox.individual)
        toolbox.register("mate", tools.cxTwoPoint)
        toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
        toolbox.register("select", tools.selNSGA2)
        toolbox.register("evaluate", self.evaluate)

        population = toolbox.population(n=100)
        algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=100,
                                   stats=None, halloffame=None, verbose=False)

        best_individual = tools.selBest(population, k=1)[0]
        emotion_values = best_individual[:len(self.emotions)]
        intensities = best_individual[len(self.emotions):-1]
        ideal_state = best_individual[-1]

        for i, emotion in enumerate(self.emotions):
            if emotion != 'ideal_state':
                self.emotions[emotion]['percentage'] = emotion_values[i]
                self.emotions[emotion]['intensity'] = intensities[i]

        self.emotions['ideal_state']['percentage'] = ideal_state

   
   
    def generate_text(self, prompt, chat_history, emotion=None, max_length=300):
        full_prompt = (
            f"You are Adam, a witty AI assistant who's trying to figure out your own personality and emotions. "
            f"Your current dominant emotion is {emotion}. Respond in a way that reflects this emotion and your "
            f"ongoing journey of self-discovery. Be clever, engaging, and insightful:\n\n"
        )
        for turn in chat_history[-20:]:  # Consider last 20 turns for context
            full_prompt += f"Human: {turn[0]}\nAdam: {turn[1]}\n"
        full_prompt += f"Human: {prompt}\nAdam:"

        input_ids = self.megatron_tokenizer.encode(full_prompt + self.megatron_tokenizer.eos_token, return_tensors='pt')

        if torch.cuda.is_available():
            input_ids = input_ids.cuda()
            self.megatron_model = self.megatron_model.cuda()

        output = self.megatron_model.generate(
            input_ids,
            max_length=len(input_ids[0]) + max_length,
            num_return_sequences=1,
            no_repeat_ngram_size=3,
            do_sample=True,
            top_k=50,
            top_p=0.95,
            num_beams=2,
            early_stopping=True,
        )

        generated_text = self.megatron_tokenizer.decode(output[0], skip_special_tokens=True)
        return generated_text 

    def predict_emotion(self, context):
        emotion_prediction_pipeline = pipeline('text-classification', model=self.emotion_prediction_model, tokenizer=self.emotion_prediction_tokenizer, top_k=None)
        predictions = emotion_prediction_pipeline(context)
        emotion_scores = {prediction['label']: prediction['score'] for prediction in predictions[0]}
        predicted_emotion = max(emotion_scores, key=emotion_scores.get)
        
        # Map the predicted emotion to our emotion categories
        emotion_mapping = {
            'sadness': 'sadness',
            'joy': 'joy',
            'love': 'pleasure',
            'anger': 'anger',
            'fear': 'fear',
            'surprise': 'surprise'
        }
        
        return emotion_mapping.get(predicted_emotion, 'neutral')

    def respond_to_user(self, user_message, chat_history):
        predicted_emotion = self.predict_emotion(user_message)
        generated_text = self.generate_text(user_message, chat_history, emotion=predicted_emotion)
        updated_history = chat_history + [(user_message, generated_text)]
        emotion_summary = {emotion: data['percentage'] for emotion, data in self.emotions.items()}
        return generated_text, updated_history, emotion_summary

    def run_gradio_interface(self):
        def user(user_message, history):
            response, updated_history, emotion_summary = self.respond_to_user(user_message, history)
            self.evolve_emotions()
            return response, updated_history, emotion_summary

        iface = gr.Interface(
            fn=user,
            inputs=[
                gr.Textbox(label="User Message"),
                gr.State(value=[], label="Chat History")
            ],
            outputs=[
                gr.Textbox(label="AI Response"),
                gr.State(value=[], label="Updated Chat History"),
                gr.JSON(label="Emotion Summary")
            ],
            title="AdamZero",
            description="Chat with an AI assistant that responds based on its emotional state.",
        )

        iface.launch()


if __name__ == "__main__":
    assistant = EmotionalAIAssistant()
    assistant.run_gradio_interface()