TejAndrewsACC commited on
Commit
694626a
·
verified ·
1 Parent(s): 146e9a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -184
app.py CHANGED
@@ -1,190 +1,40 @@
1
- import torch
2
- import torch.nn as nn
3
- import random
4
- from transformers import GPT2LMHeadModel, GPT2Tokenizer
5
- import pickle
6
- import numpy as np
7
- import torch.nn.functional as F
8
- from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch
9
- import gradio as gr
10
-
11
- # ---- Constants and Setup ----
12
- model_name = 'gpt2'
13
- tokenizer = GPT2Tokenizer.from_pretrained(model_name)
14
- model = GPT2LMHeadModel.from_pretrained(model_name)
15
- model.eval()
16
-
17
- # Ensure tokenizer pad token is set
18
- if tokenizer.pad_token is None:
19
- tokenizer.pad_token = tokenizer.eos_token
20
-
21
- tokenizer.clean_up_tokenization_spaces = True
22
-
23
- # Set device for model and tensors
24
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
25
- model.to(device)
26
-
27
- # ---- Memory Management ----
28
- session_memory = []
29
-
30
- def save_memory(memory, filename='chat_memory.pkl'):
31
- with open(filename, 'wb') as f:
32
- pickle.dump(memory, f)
33
-
34
- def load_memory(filename='chat_memory.pkl'):
35
- try:
36
- with open(filename, 'rb') as f:
37
- return pickle.load(f)
38
- except (FileNotFoundError, EOFError):
39
- return [] # Return an empty list if the file is empty or doesn't exist
40
-
41
-
42
- session_memory = load_memory()
43
-
44
- # ---- Response Generation ----
45
- def generate_response(prompt, max_length=512):
46
- inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
47
- input_ids = inputs['input_ids'].to(device)
48
- attention_mask = inputs['attention_mask'].to(device)
49
- pad_token_id = tokenizer.pad_token_id
50
-
51
- with torch.no_grad():
52
- output = model.generate(
53
- input_ids,
54
- attention_mask=attention_mask,
55
- max_length=max_length,
56
- num_return_sequences=1,
57
- no_repeat_ngram_size=2,
58
- do_sample=True,
59
- temperature=0.9,
60
- top_k=50,
61
- top_p=0.95,
62
- early_stopping=False,
63
- pad_token_id=pad_token_id
64
- )
65
-
66
- response = tokenizer.decode(output[0], skip_special_tokens=True)
67
-
68
- # Split response into two parts, where the second indent is considered the "inner thoughts"
69
- parts = response.split("\n", 1)
70
- if len(parts) > 1:
71
- before_indent = parts[0].strip()
72
- after_indent = "vß Gertrude" + parts[1].strip()
73
- final_response = before_indent + '\n' + after_indent
74
- else:
75
- final_response = response.strip()
76
-
77
- return final_response
78
-
79
- # ---- Interactive Chat Function ----
80
- def advanced_agi_chat(user_input):
81
- session_memory.append({"input": user_input})
82
- save_memory(session_memory)
83
-
84
- # Generate the response based on the prompt
85
- prompt = f"User: {user_input}\nResponse:"
86
- response = generate_response(prompt)
87
-
88
- return response
89
-
90
- # ---- Gradio Interface ----
91
- def chat_interface(user_input):
92
- response = advanced_agi_chat(user_input)
93
- return response
94
-
95
- # ---- RNN Model ----
96
- class RNNModel(nn.Module):
97
- def __init__(self, input_size, hidden_size, output_size):
98
- super(RNNModel, self).__init__()
99
- self.hidden_size = hidden_size
100
- self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
101
- self.fc = nn.Linear(hidden_size, output_size)
102
-
103
- def forward(self, x, hidden):
104
- out, hidden = self.rnn(x, hidden)
105
- out = self.fc(out[:, -1, :]) # Use last time-step
106
- return out, hidden
107
-
108
- def init_hidden(self, batch_size):
109
- return torch.zeros(batch_size, self.hidden_size).to(device)
110
-
111
- # ---- CNN Model ----
112
- class CNNModel(nn.Module):
113
- def __init__(self, input_channels, output_size):
114
- super(CNNModel, self).__init__()
115
- self.conv1 = nn.Conv2d(input_channels, 16, 3)
116
- self.conv2 = nn.Conv2d(16, 32, 3)
117
- self.fc = nn.Linear(32 * 6 * 6, output_size) # Assume input size is 28x28
118
-
119
- def forward(self, x):
120
- x = F.relu(F.max_pool2d(self.conv1(x), 2))
121
- x = F.relu(F.max_pool2d(self.conv2(x), 2))
122
- x = x.view(x.size(0), -1) # Flatten
123
- x = self.fc(x)
124
- return x
125
-
126
- # ---- Neural Network (Feedforward) ----
127
- class NNModel(nn.Module):
128
- def __init__(self, input_size, hidden_size, output_size):
129
- super(NNModel, self).__init__()
130
- self.fc1 = nn.Linear(input_size, hidden_size)
131
- self.fc2 = nn.Linear(hidden_size, output_size)
132
-
133
- def forward(self, x):
134
- x = F.relu(self.fc1(x))
135
- x = self.fc2(x)
136
- return x
137
-
138
- # ---- PHI Model ----
139
- class PHIModel(nn.Module):
140
- def __init__(self, input_size, output_size):
141
- super(PHIModel, self).__init__()
142
- self.phi = (1 + np.sqrt(5)) / 2 # Golden Ratio
143
- self.fc1 = nn.Linear(input_size, int(input_size * self.phi))
144
- self.fc2 = nn.Linear(int(input_size * self.phi), output_size)
145
-
146
- def forward(self, x):
147
- x = F.relu(self.fc1(x))
148
- x = self.fc2(x)
149
- return x
150
-
151
- # ---- Genetic Algorithm (GA) ----
152
- def ga_optimization(population, generations, mutation_rate):
153
- def fitness_function(individual):
154
- return sum(individual) # Simple fitness: sum of individual genes
155
-
156
- for gen in range(generations):
157
- population.sort(key=fitness_function, reverse=True) # Sort by fitness
158
- next_generation = population[:len(population)//2] # Keep top half
159
-
160
- # Crossover: Create new individuals by combining genes
161
- for i in range(len(population) // 2):
162
- parent1 = next_generation[i]
163
- parent2 = next_generation[len(population)//2 + i]
164
- crossover_point = random.randint(1, len(parent1) - 1)
165
- child = parent1[:crossover_point] + parent2[crossover_point:]
166
- next_generation.append(child)
167
-
168
- # Mutation: Randomly mutate genes
169
- for individual in next_generation:
170
- if random.random() < mutation_rate:
171
- mutation_point = random.randint(0, len(individual) - 1)
172
- individual[mutation_point] = random.randint(0, 1)
173
-
174
- population = next_generation # Update population
175
-
176
- return population[0] # Return the best individual
177
-
178
- # ---- Gradio App Setup ----
179
  import gradio as gr
180
 
181
  # ---- Gradio App Setup ----
182
  auth = ("Tej", "186281mps", "ACC", "HIPE")
183
 
 
184
  def simulate_button_click(button_type):
185
- # Just simulate the click animation and do nothing else
186
  return gr.update(interactive=True, elem_id="chatbot_output")
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  with gr.Blocks() as app:
189
  # Header section
190
  gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
@@ -195,8 +45,8 @@ with gr.Blocks() as app:
195
  user_input = gr.Textbox(
196
  label="What will you say to Gertrude?",
197
  placeholder="Type something here...",
198
- lines=3, # Multiline input box for more comfortable typing
199
- elem_id="user_input_box" # Custom ID for styling
200
  )
201
  submit_button = gr.Button("Send", elem_id="send_button")
202
 
@@ -204,8 +54,8 @@ with gr.Blocks() as app:
204
  chatbot = gr.Textbox(
205
  label="Gertrude's Response",
206
  interactive=False,
207
- elem_id="chatbot_output", # Custom ID for styling
208
- lines=8 # Display more text for longer responses
209
  )
210
 
211
  # Buttons for "Pain" and "Pleasure" with cool animations
@@ -215,12 +65,39 @@ with gr.Blocks() as app:
215
  with gr.Column(scale=1):
216
  pleasure_button = gr.Button("Pleasure", elem_id="pleasure_button", variant="primary", size="lg")
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  # Custom Styling for Buttons, Background, and Text
219
  gr.HTML("""
220
  <style>
221
  /* Main Container Styling */
222
  .gradio-container {
223
- background-color: #F0F8FF;
224
  padding: 20px;
225
  border-radius: 20px;
226
  font-family: 'Comic Sans MS';
@@ -323,5 +200,11 @@ with gr.Blocks() as app:
323
  pain_button.click(simulate_button_click, inputs=pain_button, outputs=chatbot)
324
  pleasure_button.click(simulate_button_click, inputs=pleasure_button, outputs=chatbot)
325
 
 
 
 
 
 
 
326
  # Launch the Gradio app
327
  app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
  # ---- Gradio App Setup ----
4
  auth = ("Tej", "186281mps", "ACC", "HIPE")
5
 
6
+ # Function to simulate the button click for Pain and Pleasure buttons
7
  def simulate_button_click(button_type):
 
8
  return gr.update(interactive=True, elem_id="chatbot_output")
9
 
10
+ # Function to update AI behavior settings dynamically
11
+ def update_ai_settings(temp, max_length, top_p, no_repeat_ngrams, adjust_response_style):
12
+ # Update AI settings based on user inputs
13
+ model.config.temperature = temp
14
+ model.config.max_length = max_length
15
+ model.config.top_p = top_p
16
+ model.config.no_repeat_ngram_size = no_repeat_ngrams
17
+
18
+ # Optionally adjust the response style here (e.g., adjust based on a tone or creative behavior)
19
+ return f"AI Settings Updated: Temperature={temp}, Max Length={max_length}, Top P={top_p}, N-grams={no_repeat_ngrams}"
20
+
21
+ # Function to simulate theme switching
22
+ def update_theme(theme_color):
23
+ if theme_color == "Light Blue":
24
+ background_color = "#ADD8E6"
25
+ text_color = "#333333"
26
+ elif theme_color == "Dark Mode":
27
+ background_color = "#2e2e2e"
28
+ text_color = "#FFFFFF"
29
+ elif theme_color == "Purple":
30
+ background_color = "#9b59b6"
31
+ text_color = "#FFFFFF"
32
+ else:
33
+ background_color = "#FFFFFF"
34
+ text_color = "#333333"
35
+
36
+ return gr.update(background_color=background_color, text_color=text_color)
37
+
38
  with gr.Blocks() as app:
39
  # Header section
40
  gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
 
45
  user_input = gr.Textbox(
46
  label="What will you say to Gertrude?",
47
  placeholder="Type something here...",
48
+ lines=3,
49
+ elem_id="user_input_box"
50
  )
51
  submit_button = gr.Button("Send", elem_id="send_button")
52
 
 
54
  chatbot = gr.Textbox(
55
  label="Gertrude's Response",
56
  interactive=False,
57
+ elem_id="chatbot_output",
58
+ lines=8
59
  )
60
 
61
  # Buttons for "Pain" and "Pleasure" with cool animations
 
65
  with gr.Column(scale=1):
66
  pleasure_button = gr.Button("Pleasure", elem_id="pleasure_button", variant="primary", size="lg")
67
 
68
+ # Settings Panel: Controls for both AI and UI theme settings
69
+ with gr.Column(scale=1):
70
+ gr.Markdown("### Settings Panel")
71
+
72
+ # AI behavior settings
73
+ gr.Markdown("#### AI Response Settings")
74
+ temperature_slider = gr.Slider(minimum=0.1, maximum=1.5, value=0.9, step=0.1, label="Creativity")
75
+ max_length_slider = gr.Slider(minimum=50, maximum=1024, value=512, step=10, label="Max Response Length")
76
+ top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Mutation")
77
+ ngram_slider = gr.Slider(minimum=1, maximum=4, value=2, step=1, label="Neural Netwok Layers")
78
+
79
+ # UI Theme settings
80
+ gr.Markdown("#### UI Theme Settings")
81
+ theme_selector = gr.Dropdown(
82
+ choices=["Light Blue", "Dark Mode", "Purple", "Default"],
83
+ label="Select Theme",
84
+ value="Light Blue"
85
+ )
86
+
87
+ response_style_toggle = gr.Checkbox(
88
+ label="Adjust AI response style (e.g., creativity level)",
89
+ value=True
90
+ )
91
+
92
+ # Button to update settings
93
+ update_button = gr.Button("Update Settings")
94
+
95
  # Custom Styling for Buttons, Background, and Text
96
  gr.HTML("""
97
  <style>
98
  /* Main Container Styling */
99
  .gradio-container {
100
+ background-color: #ADD8E6;
101
  padding: 20px;
102
  border-radius: 20px;
103
  font-family: 'Comic Sans MS';
 
200
  pain_button.click(simulate_button_click, inputs=pain_button, outputs=chatbot)
201
  pleasure_button.click(simulate_button_click, inputs=pleasure_button, outputs=chatbot)
202
 
203
+ # Update settings and UI theme
204
+ update_button.click(update_ai_settings,
205
+ inputs=[temperature_slider, max_length_slider, top_p_slider, ngram_slider, response_style_toggle],
206
+ outputs=chatbot)
207
+ theme_selector.change(update_theme, inputs=theme_selector, outputs=gr.HTML(""))
208
+
209
  # Launch the Gradio app
210
  app.launch()