Update app.py
Browse files
app.py
CHANGED
@@ -15,12 +15,13 @@ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
|
15 |
model = GPT2LMHeadModel.from_pretrained(model_name)
|
16 |
model.eval()
|
17 |
|
|
|
18 |
if tokenizer.pad_token is None:
|
19 |
tokenizer.pad_token = tokenizer.eos_token
|
20 |
|
21 |
tokenizer.clean_up_tokenization_spaces = True
|
22 |
|
23 |
-
#
|
24 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
25 |
model.to(device)
|
26 |
|
@@ -53,7 +54,6 @@ def adjust_for_emotion(response, sentiment):
|
|
53 |
return response
|
54 |
|
55 |
# ---- Neural Network Models ----
|
56 |
-
|
57 |
# 1. RNN Model for Sentiment Classification
|
58 |
class RNN(nn.Module):
|
59 |
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim):
|
@@ -128,39 +128,30 @@ def generate_response(prompt, max_length=512):
|
|
128 |
|
129 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
130 |
|
131 |
-
#
|
132 |
parts = response.split("\n", 1)
|
133 |
-
|
134 |
if len(parts) > 1:
|
135 |
-
# Text before the first indentation is orange
|
136 |
before_indent = f'<span style="color: orange;">{parts[0].strip()}</span>'
|
137 |
-
|
138 |
-
# Text after the first newline or indentation gets labeled as "Inner Thoughts:" in blue
|
139 |
after_indent = f'<span style="color: blue;">Inner Thoughts: {parts[1].strip()}</span>'
|
140 |
-
|
141 |
-
# Combine the two parts
|
142 |
colored_response = before_indent + '\n' + after_indent
|
143 |
else:
|
144 |
-
# If there's no newline, color the entire response orange
|
145 |
colored_response = f'<span style="color: orange;">{response.strip()}</span>'
|
146 |
|
147 |
return colored_response
|
148 |
|
149 |
-
|
150 |
# ---- Interactive Chat Function ----
|
151 |
def advanced_agi_chat(user_input):
|
152 |
-
# Add user input to session memory
|
153 |
session_memory.append({"input": user_input})
|
154 |
save_memory(session_memory)
|
155 |
|
156 |
-
# Sentiment analysis
|
157 |
user_sentiment = analyze_sentiment(user_input)
|
158 |
|
159 |
-
# Generate the response
|
160 |
prompt = f"User: {user_input}\nAutistic-Gertrude:"
|
161 |
response = generate_response(prompt)
|
162 |
|
163 |
-
# Adjust response
|
164 |
adjusted_response = adjust_for_emotion(response, user_sentiment)
|
165 |
|
166 |
return adjusted_response
|
@@ -168,21 +159,32 @@ def advanced_agi_chat(user_input):
|
|
168 |
# ---- Gradio Interface ----
|
169 |
def chat_interface(user_input):
|
170 |
response = advanced_agi_chat(user_input)
|
171 |
-
# Return the response with HTML formatting
|
172 |
return response
|
173 |
|
|
|
174 |
with gr.Blocks() as app:
|
175 |
gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
|
|
|
176 |
with gr.Row():
|
177 |
with gr.Column():
|
178 |
user_input = gr.Textbox(label="What will you say to Gertrude?", placeholder="Type something here... Expect 1-2 Minute Response Times...")
|
179 |
submit_button = gr.Button("Send")
|
180 |
with gr.Column():
|
181 |
-
chatbot = gr.HTML(label="Gertrude's Response"
|
182 |
-
|
183 |
-
#
|
184 |
-
gr.HTML("
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
submit_button.click(chat_interface, inputs=user_input, outputs=chatbot)
|
187 |
|
188 |
# Launch the Gradio app
|
|
|
15 |
model = GPT2LMHeadModel.from_pretrained(model_name)
|
16 |
model.eval()
|
17 |
|
18 |
+
# Ensure tokenizer pad token is set
|
19 |
if tokenizer.pad_token is None:
|
20 |
tokenizer.pad_token = tokenizer.eos_token
|
21 |
|
22 |
tokenizer.clean_up_tokenization_spaces = True
|
23 |
|
24 |
+
# Set device for model and tensors
|
25 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
26 |
model.to(device)
|
27 |
|
|
|
54 |
return response
|
55 |
|
56 |
# ---- Neural Network Models ----
|
|
|
57 |
# 1. RNN Model for Sentiment Classification
|
58 |
class RNN(nn.Module):
|
59 |
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim):
|
|
|
128 |
|
129 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
130 |
|
131 |
+
# Split response into two parts
|
132 |
parts = response.split("\n", 1)
|
|
|
133 |
if len(parts) > 1:
|
|
|
134 |
before_indent = f'<span style="color: orange;">{parts[0].strip()}</span>'
|
|
|
|
|
135 |
after_indent = f'<span style="color: blue;">Inner Thoughts: {parts[1].strip()}</span>'
|
|
|
|
|
136 |
colored_response = before_indent + '\n' + after_indent
|
137 |
else:
|
|
|
138 |
colored_response = f'<span style="color: orange;">{response.strip()}</span>'
|
139 |
|
140 |
return colored_response
|
141 |
|
|
|
142 |
# ---- Interactive Chat Function ----
|
143 |
def advanced_agi_chat(user_input):
|
|
|
144 |
session_memory.append({"input": user_input})
|
145 |
save_memory(session_memory)
|
146 |
|
147 |
+
# Sentiment analysis of user input
|
148 |
user_sentiment = analyze_sentiment(user_input)
|
149 |
|
150 |
+
# Generate the response based on the prompt
|
151 |
prompt = f"User: {user_input}\nAutistic-Gertrude:"
|
152 |
response = generate_response(prompt)
|
153 |
|
154 |
+
# Adjust the response based on sentiment
|
155 |
adjusted_response = adjust_for_emotion(response, user_sentiment)
|
156 |
|
157 |
return adjusted_response
|
|
|
159 |
# ---- Gradio Interface ----
|
160 |
def chat_interface(user_input):
|
161 |
response = advanced_agi_chat(user_input)
|
|
|
162 |
return response
|
163 |
|
164 |
+
# ---- Gradio App Setup ----
|
165 |
with gr.Blocks() as app:
|
166 |
gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
|
167 |
+
|
168 |
with gr.Row():
|
169 |
with gr.Column():
|
170 |
user_input = gr.Textbox(label="What will you say to Gertrude?", placeholder="Type something here... Expect 1-2 Minute Response Times...")
|
171 |
submit_button = gr.Button("Send")
|
172 |
with gr.Column():
|
173 |
+
chatbot = gr.HTML(label="Gertrude's Response") # No 'interactive' argument
|
174 |
+
|
175 |
+
# Adding custom styling for the UI
|
176 |
+
gr.HTML("""
|
177 |
+
<style>
|
178 |
+
.gradio-container {
|
179 |
+
background-color: #F4F8FF;
|
180 |
+
padding: 20px;
|
181 |
+
border-radius: 15px;
|
182 |
+
font-family: 'Comic Sans MS';
|
183 |
+
}
|
184 |
+
</style>
|
185 |
+
""")
|
186 |
+
|
187 |
+
# Setting the button click event
|
188 |
submit_button.click(chat_interface, inputs=user_input, outputs=chatbot)
|
189 |
|
190 |
# Launch the Gradio app
|