File size: 7,148 Bytes
887767f 39dc467 887767f a540105 887767f f0bbe3d 887767f 694626a 887767f 694626a f0bbe3d 0378fd5 9dcc4db 0378fd5 9dcc4db 694626a 887767f 706eba1 f0bbe3d 887767f 0378fd5 39dc467 887767f f0bbe3d 706eba1 887767f f0bbe3d 694626a 534f131 887767f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
import torch
import torch.nn as nn
import random
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import pickle
import numpy as np
import torch.nn.functional as F
from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch
import gradio as gr
# ---- Constants and Setup ----
model_name = 'gpt2'
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
model.eval()
# Ensure tokenizer pad token is set
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.clean_up_tokenization_spaces = True
# Set device for model and tensorss
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
# ---- Memory Management ----
session_memory = []
def save_memory(memory, filename='chat_memory.pkl'):
with open(filename, 'wb') as f:
pickle.dump(memory, f)
def load_memory(filename='chat_memory.pkl'):
try:
with open(filename, 'rb') as f:
return pickle.load(f)
except (FileNotFoundError, EOFError):
return [] # Return an empty list if the file is empty or doesn't exist
session_memory = load_memory()
# ---- Response Generation ----
def generate_response(prompt, max_length=512):
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
input_ids = inputs['input_ids'].to(device)
attention_mask = inputs['attention_mask'].to(device)
pad_token_id = tokenizer.pad_token_id
with torch.no_grad():
output = model.generate(
input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=1,
no_repeat_ngram_size=2,
do_sample=True,
temperature=0.9,
top_k=50,
top_p=0.95,
early_stopping=False,
pad_token_id=pad_token_id
)
response = tokenizer.decode(output[0], skip_special_tokens=True)
# Split response into two parts, where the second indent is considered the "inner thoughts"
parts = response.split("\n", 1)
if len(parts) > 1:
before_indent = parts[0].strip()
after_indent = "vß Gertrude" + parts[1].strip()
final_response = before_indent + '\n' + after_indent
else:
final_response = response.strip()
return final_response
# ---- Interactive Chat Function ----
def advanced_agi_chat(user_input):
session_memory.append({"input": user_input})
save_memory(session_memory)
# Generate the response based on the prompt
prompt = f"User: {user_input}\nResponse:"
response = generate_response(prompt)
return response
# ---- Gradio Interface ----
def chat_interface(user_input):
response = advanced_agi_chat(user_input)
return response
# ---- RNN Model ----
class RNNModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNNModel, self).__init__()
self.hidden_size = hidden_size
self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden):
out, hidden = self.rnn(x, hidden)
out = self.fc(out[:, -1, :]) # Use last time-step
return out, hidden
def init_hidden(self, batch_size):
return torch.zeros(batch_size, self.hidden_size).to(device)
# ---- CNN Model ----
class CNNModel(nn.Module):
def __init__(self, input_channels, output_size):
super(CNNModel, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 16, 3)
self.conv2 = nn.Conv2d(16, 32, 3)
self.fc = nn.Linear(32 * 6 * 6, output_size) # Assume input size is 28x28
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(x.size(0), -1) # Flatten
x = self.fc(x)
return x
# ---- Neural Network (Feedforward) ----
class NNModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(NNModel, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# ---- PHI Model ----
class PHIModel(nn.Module):
def __init__(self, input_size, output_size):
super(PHIModel, self).__init__()
self.phi = (1 + np.sqrt(5)) / 2 # Golden Ratio
self.fc1 = nn.Linear(input_size, int(input_size * self.phi))
self.fc2 = nn.Linear(int(input_size * self.phi), output_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# ---- Genetic Algorithm (GA) ----
def ga_optimization(population, generations, mutation_rate):
def fitness_function(individual):
return sum(individual) # Simple fitness: sum of individual genes
for gen in range(generations):
population.sort(key=fitness_function, reverse=True) # Sort by fitness
next_generation = population[:len(population)//2] # Keep top half
# Crossover: Create new individuals by combining genes
for i in range(len(population) // 2):
parent1 = next_generation[i]
parent2 = next_generation[len(population)//2 + i]
crossover_point = random.randint(1, len(parent1) - 1)
child = parent1[:crossover_point] + parent2[crossover_point:]
next_generation.append(child)
# Mutation: Randomly mutate genes
for individual in next_generation:
if random.random() < mutation_rate:
mutation_point = random.randint(0, len(individual) - 1)
individual[mutation_point] = random.randint(0, 1)
population = next_generation # Update population
return population[0] # Return the best individual
# ---- Gradio App Setup ----
auth = ("Tej", "186281mps", "ACC", "HIPE")
with gr.Blocks() as app:
gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
with gr.Row():
with gr.Column(scale=1):
user_input = gr.Textbox(label="🎙️What will you say to Gertrude?🎙️", placeholder="⌨️Type something here...")
submit_button = gr.Button("💬Send💬")
with gr.Column(scale=1):
chatbot = gr.Textbox(label="🤖Gertrude's Response:", interactive=False) # This is now a Textbox for output
# Adding custom styling for the UI
gr.HTML("""
<style>
.gradio-container {
background-color: #B3D9FF;
padding: 20px;
border-radius: 15px;
font-family: 'Comic Sans MS';
}
.gradio-row {
display: flex;
justify-content: space-between;
}
</style>
""")
# Setting the button click event
submit_button.click(chat_interface, inputs=user_input, outputs=chatbot)
# Launch the Gradio app
app.launch()
|