Spaces:
Running
Running
import numpy as np | |
import pandas as pd | |
import os | |
import json | |
import random | |
import gradio as gr | |
from sklearn.ensemble import IsolationForest | |
from sklearn.model_selection import train_test_split | |
from sklearn.preprocessing import OneHotEncoder | |
from sklearn.neural_network import MLPClassifier | |
from deap import base, creator, tools, algorithms | |
from transformers import BloomForCausalLM, BloomTokenizerFast | |
import torch | |
import torch.multiprocessing as mp | |
# Initialize Example Emotions Dataset | |
data = { | |
'context': [ | |
'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm', | |
'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated', | |
'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated', | |
'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic', | |
'I am pessimistic', 'I feel bored', 'I am envious' | |
], | |
'emotion': [ | |
'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger', | |
'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust', | |
'disgust', 'optimism', 'pessimism', 'boredom', 'envy' | |
] | |
} | |
df = pd.DataFrame(data) | |
# Encoding the contexts using One-Hot Encoding | |
encoder = OneHotEncoder(handle_unknown='ignore') | |
contexts_encoded = encoder.fit_transform(df[['context']]).toarray() | |
# Encoding emotions | |
emotions_target = df['emotion'].astype('category').cat.codes | |
emotion_classes = df['emotion'].astype('category').cat.categories | |
# Train Neural Network | |
X_train, X_test, y_train, y_test = train_test_split(contexts_encoded, emotions_target, test_size=0.2, random_state=42) | |
model = MLPClassifier(hidden_layer_sizes=(10, 10), max_iter=1000, random_state=42) | |
model.fit(X_train, y_train) | |
# Isolation Forest Anomaly Detection Model | |
historical_data = np.array([model.predict(contexts_encoded)]).T | |
isolation_forest = IsolationForest(contamination=0.1, random_state=42) | |
isolation_forest.fit(historical_data) | |
# Emotional States with 20 emotions | |
emotions = { | |
'joy': {'percentage': 10, 'motivation': 'positive'}, | |
'pleasure': {'percentage': 10, 'motivation': 'selfish'}, | |
'sadness': {'percentage': 10, 'motivation': 'negative'}, | |
'grief': {'percentage': 10, 'motivation': 'negative'}, | |
'anger': {'percentage': 10, 'motivation': 'traumatic or strong'}, | |
'calmness': {'percentage': 10, 'motivation': 'neutral'}, | |
'determination': {'percentage': 10, 'motivation': 'positive'}, | |
'resentment': {'percentage': 10, 'motivation': 'negative'}, | |
'glory': {'percentage': 10, 'motivation': 'positive'}, | |
'motivation': {'percentage': 10, 'motivation': 'positive'}, | |
'ideal_state': {'percentage': 100, 'motivation': 'balanced'}, | |
'fear': {'percentage': 10, 'motivation': 'defensive'}, | |
'surprise': {'percentage': 10, 'motivation': 'unexpected'}, | |
'anticipation': {'percentage': 10, 'motivation': 'predictive'}, | |
'trust': {'percentage': 10, 'motivation': 'reliable'}, | |
'disgust': {'percentage': 10, 'motivation': 'repulsive'}, | |
'optimism': {'percentage': 10, 'motivation': 'hopeful'}, | |
'pessimism': {'percentage': 10, 'motivation': 'doubtful'}, | |
'boredom': {'percentage': 10, 'motivation': 'indifferent'}, | |
'envy': {'percentage': 10, 'motivation': 'jealous'} | |
} | |
# Adjust all emotions to a total of 200% | |
total_percentage = 200 | |
default_percentage = total_percentage / len(emotions) | |
for emotion in emotions: | |
emotions[emotion]['percentage'] = default_percentage | |
emotion_history_file = 'emotion_history.json' | |
# Load historical data from file if exists | |
def load_historical_data(file_path=emotion_history_file): | |
if os.path.exists(file_path): | |
with open(file_path, 'r') as file: | |
return json.load(file) | |
return [] | |
# Save historical data to file | |
def save_historical_data(historical_data, file_path=emotion_history_file): | |
with open(file_path, 'w') as file: | |
json.dump(historical_data, file) | |
# Load previous emotional states | |
emotion_history = load_historical_data() | |
# Function to update emotions | |
def update_emotion(emotion, percentage): | |
emotions['ideal_state']['percentage'] -= percentage | |
emotions[emotion]['percentage'] += percentage | |
# Ensure total percentage remains 200% | |
total_current = sum(e['percentage'] for e in emotions.values()) | |
adjustment = total_percentage - total_current | |
emotions['ideal_state']['percentage'] += adjustment | |
# Function to normalize context | |
def normalize_context(context): | |
return context.lower().strip() | |
# Function to evolve emotions using genetic algorithm | |
def evolve_emotions(): | |
# Define the fitness function | |
def evaluate(individual): | |
ideal_state = individual[-1] # Last value is the ideal state percentage | |
other_emotions = individual[:-1] # All other emotions | |
return abs(ideal_state - 100), sum(other_emotions) | |
# Register the genetic algorithm components | |
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0)) | |
creator.create("Individual", list, fitness=creator.FitnessMin) | |
# Create individuals and population | |
toolbox = base.Toolbox() | |
toolbox.register("attribute", lambda: random.uniform(0, 20)) | |
toolbox.register("individual", tools.initCycle, creator.Individual, toolbox.attribute, n=(len(emotions) - 1)) | |
toolbox.register("ideal_state", lambda: random.uniform(80, 120)) | |
toolbox.register("complete_individual", tools.initConcat, creator.Individual, toolbox.individual, toolbox.ideal_state) | |
toolbox.register("population", tools.initRepeat, list, toolbox.complete_individual) | |
# Register genetic operators | |
toolbox.register("evaluate", evaluate) | |
toolbox.register("mate", tools.cxBlend, alpha=0.5) | |
toolbox.register("mutate", tools.mutGaussian, mu=10, sigma=5, indpb=0.3) | |
toolbox.register("select", tools.selTournament, tournsize=3) | |
# Initialize the population | |
population = toolbox.population(n=10) | |
# Run genetic algorithm | |
population, log = algorithms.eaSimple(population, toolbox, cxpb=0.5, mutpb=0.2, ngen=20, verbose=False) | |
# Update the emotions with the best individual | |
best_individual = tools.selBest(population, k=1)[0] | |
for idx, emotion in enumerate(emotions.keys()): | |
emotions[emotion]['percentage'] = best_individual[idx] | |
# Function to get emotional response | |
def get_emotional_response(context): | |
# Normalize context | |
context = normalize_context(context) | |
# Encode the context and predict the emotion using the neural network | |
context_encoded = encoder.transform([[context]]).toarray() | |
prediction = model.predict(context_encoded) | |
predicted_emotion = emotion_classes[prediction[0]] | |
# Check for anomalies using Isolation Forest | |
anomaly_score = isolation_forest.decision_function([prediction])[0] | |
if anomaly_score < -0.5: | |
print("Anomalous context detected. Adjusting emotional response.") | |
update_emotion('calmness', 20) | |
else: | |
# Define emotional responses | |
if predicted_emotion == 'joy': | |
update_emotion('joy', 20) | |
update_emotion('pleasure', 20) | |
elif predicted_emotion == 'sadness': | |
update_emotion('sadness', 20) | |
update_emotion('grief', 20) | |
elif predicted_emotion == 'anger': | |
update_emotion('anger', 20) | |
elif predicted_emotion == 'determination': | |
update_emotion('determination', 20) | |
elif predicted_emotion == 'resentment': | |
update_emotion('resentment', 20) | |
elif predicted_emotion == 'glory': | |
update_emotion('glory', 20) | |
elif predicted_emotion == 'motivation': | |
update_emotion('motivation', 20) | |
elif predicted_emotion == 'surprise': | |
update_emotion('surprise', 20) | |
elif predicted_emotion == 'fear': | |
update_emotion('fear', 20) | |
elif predicted_emotion == 'trust': | |
update_emotion('trust', 20) | |
elif predicted_emotion == 'disgust': | |
update_emotion('disgust', 20) | |
elif predicted_emotion == 'optimism': | |
update_emotion('optimism', 20) | |
elif predicted_emotion == 'pessimism': | |
update_emotion('pessimism', 20) | |
elif predicted_emotion == 'boredom': | |
update_emotion('boredom', 20) | |
elif predicted_emotion == 'envy': | |
update_emotion('envy', 20) | |
else: | |
update_emotion('calmness', 20) | |
# Record the current emotional state in history | |
emotion_state = {emotion: data['percentage'] for emotion, data in emotions.items()} | |
emotion_history.append(emotion_state) | |
# Save the history to file | |
save_historical_data(emotion_history) | |
# Print the current emotional state | |
response = "" | |
for emotion, data in emotions.items(): | |
response += f"{emotion.capitalize()}: {data['percentage']:.2f}% ({data['motivation']} motivation)\n" | |
return response | |
# Function to handle idle state using genetic algorithm | |
def handle_idle_state(): | |
evolve_emotions() | |
response = "Emotions evolved\n" | |
for emotion, data in emotions.items(): | |
response += f"{emotion.capitalize()}: {data['percentage']:.2f}% ({data['motivation']} motivation)\n" | |
return response | |
# S.O.U.L. (Self-Organizing Universal Learning) Function | |
class SOUL: | |
def __init__(self, model_name='bigscience/bloom-1b1'): | |
self.tokenizer = BloomTokenizerFast.from_pretrained(model_name) | |
self.model = BloomForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16) | |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
self.model.to(self.device) | |
def generate_text(self, prompt, max_length=100): | |
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device) | |
# Generate | |
with torch.no_grad(): | |
generate_ids = self.model.generate( | |
inputs.input_ids, | |
max_length=max_length, | |
num_return_sequences=1, | |
no_repeat_ngram_size=2, | |
do_sample=True, | |
top_k=50, | |
top_p=0.95, | |
temperature=0.7 | |
) | |
return self.tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] | |
def bridge_ai(self, prompt): | |
# Generate the response using BLOOM | |
bloom_response = self.generate_text(prompt) | |
# Get the emotional response | |
emotional_response = get_emotional_response(bloom_response) | |
return bloom_response, emotional_response | |
# Example usage of S.O.U.L. function | |
soul = SOUL() | |
def interact_with_soul(user_input): | |
bloom_response, emotional_response = soul.bridge_ai(user_input) | |
return bloom_response, emotional_response | |
# Function to handle Gradio interface using multiprocessing | |
def launch_gradio(): | |
iface = gr.Interface( | |
fn=interact_with_soul, | |
inputs="text", | |
outputs=["text", "text"], | |
title="S.O.U.L AI", | |
description="Enter a prompt to interact with the S.O.U.L AI, which will generate a response and provide an emotional analysis." | |
) | |
iface.launch() | |
# Use multiprocessing to utilize all CPU cores | |
if __name__ == '__main__': | |
mp.set_start_method('spawn') | |
p = mp.Process(target=launch_gradio) | |
p.start() | |
p.join() | |