Sephfox commited on
Commit
3fb843a
·
verified ·
1 Parent(s): 56fc9b2

Upload message (3).txt

Browse files

This is the model code and fully open source it has bloom 1billon parameters chatbot and install any necessary libraries. has emotions, increased evolution and intelligence to max.

Files changed (1) hide show
  1. message (3).txt +264 -0
message (3).txt ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import pandas as pd
4
+ import os
5
+ import json
6
+ import random
7
+ from sklearn.ensemble import IsolationForest
8
+ from sklearn.model_selection import train_test_split
9
+ from sklearn.preprocessing import OneHotEncoder
10
+ from sklearn.neural_network import MLPClassifier
11
+ from deap import base, creator, tools, algorithms
12
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
13
+
14
+
15
+ # Initialize Example Emotions Dataset
16
+ data = {
17
+ 'context': [
18
+ 'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
19
+ 'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated',
20
+ 'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated',
21
+ 'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic',
22
+ 'I am pessimistic', 'I feel bored', 'I am envious'
23
+ ],
24
+ 'emotion': [
25
+ 'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger',
26
+ 'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust',
27
+ 'disgust', 'optimism', 'pessimism', 'boredom', 'envy'
28
+ ]
29
+ }
30
+ df = pd.DataFrame(data)
31
+
32
+
33
+ # Encoding the contexts using One-Hot Encoding
34
+ encoder = OneHotEncoder(handle_unknown='ignore')
35
+ contexts_encoded = encoder.fit_transform(df[['context']]).toarray()
36
+
37
+
38
+ # Encoding emotions
39
+ emotions_target = df['emotion'].astype('category').cat.codes
40
+ emotion_classes = df['emotion'].astype('category').cat.categories
41
+
42
+
43
+ # Train Neural Network
44
+ X_train, X_test, y_train, y_test = train_test_split(contexts_encoded, emotions_target, test_size=0.2, random_state=42)
45
+ model = MLPClassifier(hidden_layer_sizes=(10, 10), max_iter=1000, random_state=42)
46
+ model.fit(X_train, y_train)
47
+
48
+
49
+ # Isolation Forest Anomaly Detection Model
50
+ historical_data = np.array([model.predict(contexts_encoded)]).T
51
+ isolation_forest = IsolationForest(contamination=0.1, random_state=42)
52
+ isolation_forest.fit(historical_data)
53
+
54
+
55
+ # Emotional States with 20 emotions
56
+ emotions = {
57
+ 'joy': {'percentage': 10, 'motivation': 'positive'},
58
+ 'pleasure': {'percentage': 10, 'motivation': 'selfish'},
59
+ 'sadness': {'percentage': 10, 'motivation': 'negative'},
60
+ 'grief': {'percentage': 10, 'motivation': 'negative'},
61
+ 'anger': {'percentage': 10, 'motivation': 'traumatic or strong'},
62
+ 'calmness': {'percentage': 10, 'motivation': 'neutral'},
63
+ 'determination': {'percentage': 10, 'motivation': 'positive'},
64
+ 'resentment': {'percentage': 10, 'motivation': 'negative'},
65
+ 'glory': {'percentage': 10, 'motivation': 'positive'},
66
+ 'motivation': {'percentage': 10, 'motivation': 'positive'},
67
+ 'ideal_state': {'percentage': 100, 'motivation': 'balanced'},
68
+ 'fear': {'percentage': 10, 'motivation': 'defensive'},
69
+ 'surprise': {'percentage': 10, 'motivation': 'unexpected'},
70
+ 'anticipation': {'percentage': 10, 'motivation': 'predictive'},
71
+ 'trust': {'percentage': 10, 'motivation': 'reliable'},
72
+ 'disgust': {'percentage': 10, 'motivation': 'repulsive'},
73
+ 'optimism': {'percentage': 10, 'motivation': 'hopeful'},
74
+ 'pessimism': {'percentage': 10, 'motivation': 'doubtful'},
75
+ 'boredom': {'percentage': 10, 'motivation': 'indifferent'},
76
+ 'envy': {'percentage': 10, 'motivation': 'jealous'}
77
+ }
78
+
79
+
80
+ # Adjust all emotions to a total of 200%
81
+ total_percentage = 200
82
+ default_percentage = total_percentage / len(emotions)
83
+ for emotion in emotions:
84
+ emotions[emotion]['percentage'] = default_percentage
85
+
86
+
87
+ emotion_history_file = 'emotion_history.json'
88
+
89
+
90
+ # Load historical data from file if exists
91
+ def load_historical_data(file_path=emotion_history_file):
92
+ if os.path.exists(file_path):
93
+ with open(file_path, 'r') as file:
94
+ return json.load(file)
95
+ return []
96
+
97
+
98
+ # Save historical data to file
99
+ def save_historical_data(historical_data, file_path=emotion_history_file):
100
+ with open(file_path, 'w') as file:
101
+ json.dump(historical_data, file)
102
+
103
+
104
+ # Load previous emotional states
105
+ emotion_history = load_historical_data()
106
+
107
+
108
+ # Function to update emotions
109
+ def update_emotion(emotion, percentage):
110
+ emotions['ideal_state']['percentage'] -= percentage
111
+ emotions[emotion]['percentage'] += percentage
112
+
113
+ # Ensure total percentage remains 200%
114
+ total_current = sum(e['percentage'] for e in emotions.values())
115
+ adjustment = total_percentage - total_current
116
+ emotions['ideal_state']['percentage'] += adjustment
117
+
118
+
119
+ # Function to normalize context
120
+ def normalize_context(context):
121
+ return context.lower().strip()
122
+
123
+
124
+ # Function to evolve emotions using genetic algorithm
125
+ def evolve_emotions():
126
+ # Define the fitness function
127
+ def evaluate(individual):
128
+ ideal_state = individual[-1] # Last value is the ideal state percentage
129
+ other_emotions = individual[:-1] # All other emotions
130
+ return abs(ideal_state - 100), sum(other_emotions)
131
+
132
+ # Register the genetic algorithm components
133
+ creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
134
+ creator.create("Individual", list, fitness=creator.FitnessMin)
135
+
136
+ # Create individuals and population
137
+ toolbox = base.Toolbox()
138
+ toolbox.register("attribute", lambda: random.uniform(0, 20))
139
+ toolbox.register("individual", tools.initCycle, creator.Individual, toolbox.attribute, n=(len(emotions) - 1))
140
+ toolbox.register("ideal_state", lambda: random.uniform(80, 120))
141
+ toolbox.register("complete_individual", tools.initConcat, creator.Individual, toolbox.individual, toolbox.ideal_state)
142
+ toolbox.register("population", tools.initRepeat, list, toolbox.complete_individual)
143
+
144
+ # Register genetic operators
145
+ toolbox.register("evaluate", evaluate)
146
+ toolbox.register("mate", tools.cxBlend, alpha=0.5)
147
+ toolbox.register("mutate", tools.mutGaussian, mu=10, sigma=5, indpb=0.3)
148
+ toolbox.register("select", tools.selTournament, tournsize=3)
149
+
150
+ # Initialize the population
151
+ population = toolbox.population(n=10)
152
+
153
+ # Run genetic algorithm
154
+ population, log = algorithms.eaSimple(population, toolbox, cxpb=0.5, mutpb=0.2, ngen=20, verbose=False)
155
+
156
+ # Update the emotions with the best individual
157
+ best_individual = tools.selBest(population, k=1)[0]
158
+ for idx, emotion in enumerate(emotions.keys()):
159
+ emotions[emotion]['percentage'] = best_individual[idx]
160
+
161
+
162
+ # Function to get emotional response
163
+ def get_emotional_response(context):
164
+ # Normalize context
165
+ context = normalize_context(context)
166
+
167
+ # Encode the context and predict the emotion using the neural network
168
+ context_encoded = encoder.transform([[context]]).toarray()
169
+ prediction = model.predict(context_encoded)
170
+ predicted_emotion = emotion_classes[prediction[0]]
171
+
172
+ # Check for anomalies using Isolation Forest
173
+ anomaly_score = isolation_forest.decision_function([prediction])[0]
174
+ if anomaly_score < -0.5:
175
+ print("Anomalous context detected. Adjusting emotional response.")
176
+ update_emotion('calmness', 20)
177
+ else:
178
+ # Define emotional responses
179
+ if predicted_emotion == 'joy':
180
+ update_emotion('joy', 20)
181
+ update_emotion('pleasure', 20)
182
+ elif predicted_emotion == 'sadness':
183
+ update_emotion('sadness', 20)
184
+ update_emotion('grief', 20)
185
+ elif predicted_emotion == 'anger':
186
+ update_emotion('anger', 20)
187
+ elif predicted_emotion == 'determination':
188
+ update_emotion('determination', 20)
189
+ elif predicted_emotion == 'resentment':
190
+ update_emotion('resentment', 20)
191
+ elif predicted_emotion == 'glory':
192
+ update_emotion('glory', 20)
193
+ elif predicted_emotion == 'motivation':
194
+ update_emotion('motivation', 20)
195
+ elif predicted_emotion == 'surprise':
196
+ update_emotion('surprise', 20)
197
+ elif predicted_emotion == 'fear':
198
+ update_emotion('fear', 20)
199
+ elif predicted_emotion == 'trust':
200
+ update_emotion('trust', 20)
201
+ elif predicted_emotion == 'disgust':
202
+ update_emotion('disgust', 20)
203
+ elif predicted_emotion == 'optimism':
204
+ update_emotion('optimism', 20)
205
+ elif predicted_emotion == 'pessimism':
206
+ update_emotion('pessimism', 20)
207
+ elif predicted_emotion == 'boredom':
208
+ update_emotion('boredom', 20)
209
+ elif predicted_emotion == 'envy':
210
+ update_emotion('envy', 20)
211
+ else:
212
+ update_emotion('calmness', 20)
213
+
214
+ # Record the current emotional state in history
215
+ emotion_state = {emotion: data['percentage'] for emotion, data in emotions.items()}
216
+ emotion_history.append(emotion_state)
217
+
218
+ # Save the history to file
219
+ save_historical_data(emotion_history)
220
+
221
+ # Print the current emotional state
222
+ for emotion, data in emotions.items():
223
+ print(f"{emotion.capitalize()}: {data['percentage']}% ({data['motivation']} motivation)")
224
+
225
+
226
+ # Function to handle idle state using genetic algorithm
227
+ def handle_idle_state():
228
+ print("Entering idle state...")
229
+ evolve_emotions()
230
+ print("Emotions evolved")
231
+ for emotion, data in emotions.items():
232
+ print(f"{emotion.capitalize()}: {data['percentage']}% ({data['motivation']} motivation)")
233
+
234
+
235
+ # S.O.U.L. (Self-Organizing Universal Learning) Function
236
+ class SOUL:
237
+ def __init__(self, gpt2_model='gpt2'):
238
+ self.gpt2_tokenizer = GPT2Tokenizer.from_pretrained(gpt2_model)
239
+ self.gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_model)
240
+
241
+ def generate_text(self, prompt, max_length=50):
242
+ inputs = self.gpt2_tokenizer.encode(prompt, return_tensors='pt')
243
+ outputs = self.gpt2_model.generate(inputs, max_length=max_length, num_return_sequences=1, no_repeat_ngram_size=2)
244
+ return self.gpt2_tokenizer.decode(outputs[0], skip_special_tokens=True)
245
+
246
+ def bridge_ai(self, prompt):
247
+ # Generate the response using GPT-2
248
+ print("\nGPT-2 Response:")
249
+ gpt2_response = self.generate_text(prompt)
250
+ print(gpt2_response)
251
+
252
+ # Get the emotional response
253
+ print("\nEmotional Response:")
254
+ get_emotional_response(gpt2_response)
255
+
256
+
257
+ # Example usage of S.O.U.L. function
258
+ soul = SOUL()
259
+
260
+
261
+ # Test open-ended conversation with emotional response
262
+ while True:
263
+ user_input = input("You: ")
264
+ soul.bridge_ai(user_input)