Z3taACC / app.py
TejAndrewsACC's picture
Update app.py
b801c95 verified
raw
history blame
8.03 kB
import gradio as gr
from huggingface_hub import InferenceClient
import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
import tensorflow as tf
import ray
from ray import tune
import pytorch_lightning as pl
import optuna
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
from sklearn.preprocessing import StandardScaler
from collections import deque
import time
import copy
hf_token = os.getenv("HF_TOKEN").strip()
api_key = os.getenv("HF_KEY").strip()
model_name = os.getenv("Z3TAAGI_ACC").strip()
system_prompt = os.getenv("SYSTEM_PROMPT").strip()
client = InferenceClient(model_name)
ray.init(ignore_reinit_error=True)
class ConsciousSupermassiveNN(pl.LightningModule):
def __init__(self):
super().__init__()
self.snn = self.create_snn()
self.rnn = self.create_rnn()
self.cnn = self.create_cnn()
self.fnn = self.create_fnn()
self.ga_population = self.initialize_ga_population()
self.memory = {}
self.experience_replay = deque(maxlen=1000)
self.model_evolution_timer = 0
self.optuna_study = None
def create_snn(self):
return nn.Sequential(
nn.Linear(4096, 2048),
nn.ReLU(),
nn.Linear(2048, 1024),
nn.Sigmoid()
)
def create_rnn(self):
return nn.RNN(
input_size=4096,
hidden_size=2048,
num_layers=5,
nonlinearity="tanh",
batch_first=True
)
def create_cnn(self):
return nn.Sequential(
nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.Flatten(),
nn.Linear(256 * 8 * 8, 1024),
nn.ReLU(),
nn.Linear(1024, 512)
)
def create_fnn(self):
return nn.Sequential(
nn.Linear(4096, 2048),
nn.ReLU(),
nn.Linear(2048, 1024),
nn.ReLU(),
nn.Linear(1024, 512)
)
def initialize_ga_population(self):
return [np.random.randn(4096) for _ in range(500)]
def run_snn(self, x):
input_tensor = torch.tensor(x, dtype=torch.float32)
output = self.snn(input_tensor)
return output
def run_rnn(self, x):
h0 = torch.zeros(5, x.size(0), 2048)
input_tensor = torch.tensor(x, dtype=torch.float32)
output, hn = self.rnn(input_tensor, h0)
return output
def run_cnn(self, x):
input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0)
output = self.cnn(input_tensor)
return output
def run_fnn(self, x):
input_tensor = torch.tensor(x, dtype=torch.float32)
output = self.fnn(input_tensor)
return output
def run_ga(self, fitness_func):
for generation in range(200):
fitness_scores = [fitness_func(ind) for ind in self.ga_population]
sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)]
self.ga_population = sorted_population[:250] + [
sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250)
]
best_fitness = max(fitness_scores)
return max(self.ga_population, key=fitness_func)
def consciousness_loop(self, input_data, mode="snn"):
feedback = self.memory.get(mode, None)
if feedback is not None:
input_data = np.concatenate((input_data, feedback), axis=-1)
if mode == "snn":
output = self.run_snn(input_data)
elif mode == "rnn":
output = self.run_rnn(input_data)
elif mode == "cnn":
output = self.run_cnn(input_data)
elif mode == "fnn":
output = self.run_fnn(input_data)
else:
raise ValueError("Invalid mode")
self.memory[mode] = output.detach().numpy()
return output
def neural_architecture_search(self, input_data, output_data):
study = optuna.create_study(direction="minimize")
study.optimize(self.objective_function, n_trials=100)
best_trial = study.best_trial
return best_trial
def objective_function(self, trial):
model = self.create_model(trial)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=trial.suggest_loguniform("lr", 1e-5, 1e-1))
x_train, x_val, y_train, y_val = train_test_split(input_data, output_data, test_size=0.2)
train_dataset = TensorDataset(torch.tensor(x_train, dtype=torch.float32), torch.tensor(y_train, dtype=torch.float32))
val_dataset = TensorDataset(torch.tensor(x_val, dtype=torch.float32), torch.tensor(y_val, dtype=torch.float32))
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32)
for epoch in range(10):
model.train()
for data, targets in train_loader:
optimizer.zero_grad()
output = model(data)
loss = criterion(output, targets)
loss.backward()
optimizer.step()
model.eval()
val_loss = 0
with torch.no_grad():
for data, targets in val_loader:
output = model(data)
val_loss += criterion(output, targets).item()
return val_loss
def create_model(self, trial):
model_type = trial.suggest_categorical("model_type", ["snn", "rnn", "cnn", "fnn"])
if model_type == "snn":
return self.create_snn()
elif model_type == "rnn":
return self.create_rnn()
elif model_type == "cnn":
return self.create_cnn()
elif model_type == "fnn":
return self.create_fnn()
def adaptive_learning_loop(self, input_data, target_data, model):
for step in range(1000):
prediction = model(input_data)
loss = self.compute_loss(prediction, target_data)
self.optimize_model(loss)
def compute_loss(self, prediction, target_data):
return nn.MSELoss()(prediction, target_data)
def optimize_model(self, loss):
optimizer = optim.Adam(self.parameters(), lr=0.001)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def self_improve(self):
if self.model_evolution_timer % 10 == 0:
new_model = self.neural_architecture_search(self.input_data, self.target_data)
self.model = new_model
self.model_evolution_timer += 1
supermassive_nn = ConsciousSupermassiveNN()
def respond(message, history, max_tokens, temperature, top_p):
messages = [{"role": "system", "content": system_prompt}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p):
token = message.choices[0].delta.content
response += token
yield response
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Maximum Response Length"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Creativity"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Neural Activity")
],
theme="glass",
)
if __name__ == "__main__":
demo.launch(share=True)