Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
# Import necessary libraries
|
2 |
import torch
|
3 |
import torch.nn as nn
|
4 |
import random
|
@@ -53,57 +52,6 @@ def adjust_for_emotion(response, sentiment):
|
|
53 |
return f"I'm sorry to hear that: {response}. How can I assist you further?"
|
54 |
return response
|
55 |
|
56 |
-
# ---- Neural Network Models ----
|
57 |
-
# 1. RNN Model for Sentiment Classification
|
58 |
-
class RNN(nn.Module):
|
59 |
-
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim):
|
60 |
-
super(RNN, self).__init__()
|
61 |
-
self.embedding = nn.Embedding(vocab_size, embedding_dim)
|
62 |
-
self.rnn = nn.RNN(embedding_dim, hidden_dim, batch_first=True)
|
63 |
-
self.fc = nn.Linear(hidden_dim, output_dim)
|
64 |
-
|
65 |
-
def forward(self, x):
|
66 |
-
embedded = self.embedding(x)
|
67 |
-
out, _ = self.rnn(embedded)
|
68 |
-
out = out[:, -1, :] # Get the last hidden state
|
69 |
-
out = self.fc(out)
|
70 |
-
return out
|
71 |
-
|
72 |
-
# 2. CNN Model for Text Classification
|
73 |
-
class CNN(nn.Module):
|
74 |
-
def __init__(self, vocab_size, embedding_dim, num_filters, filter_sizes, output_dim):
|
75 |
-
super(CNN, self).__init__()
|
76 |
-
self.embedding = nn.Embedding(vocab_size, embedding_dim)
|
77 |
-
self.convs = nn.ModuleList([
|
78 |
-
nn.Conv2d(1, num_filters, (fs, embedding_dim)) for fs in filter_sizes
|
79 |
-
])
|
80 |
-
self.fc = nn.Linear(num_filters * len(filter_sizes), output_dim)
|
81 |
-
|
82 |
-
def forward(self, x):
|
83 |
-
embedded = self.embedding(x).unsqueeze(1) # Add channel dimension
|
84 |
-
conv_results = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]
|
85 |
-
pooled_results = [F.max_pool1d(conv, conv.size(2)).squeeze(2) for conv in conv_results]
|
86 |
-
cat_results = torch.cat(pooled_results, dim=1)
|
87 |
-
out = self.fc(cat_results)
|
88 |
-
return out
|
89 |
-
|
90 |
-
# 3. Simple Feed-Forward Neural Network (NN) for additional processing
|
91 |
-
class FFNN(nn.Module):
|
92 |
-
def __init__(self, input_dim, hidden_dim, output_dim):
|
93 |
-
super(FFNN, self).__init__()
|
94 |
-
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
95 |
-
self.fc2 = nn.Linear(hidden_dim, output_dim)
|
96 |
-
|
97 |
-
def forward(self, x):
|
98 |
-
x = F.relu(self.fc1(x))
|
99 |
-
x = self.fc2(x)
|
100 |
-
return x
|
101 |
-
|
102 |
-
# Initialize models
|
103 |
-
rnn_model = RNN(vocab_size=len(tokenizer), embedding_dim=100, hidden_dim=128, output_dim=2).to(device)
|
104 |
-
cnn_model = CNN(vocab_size=len(tokenizer), embedding_dim=100, num_filters=64, filter_sizes=[3, 4, 5], output_dim=2).to(device)
|
105 |
-
ffnn_model = FFNN(input_dim=100, hidden_dim=50, output_dim=1).to(device)
|
106 |
-
|
107 |
# ---- Response Generation ----
|
108 |
def generate_response(prompt, max_length=512):
|
109 |
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
@@ -128,7 +76,7 @@ def generate_response(prompt, max_length=512):
|
|
128 |
|
129 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
130 |
|
131 |
-
# Split response into two parts
|
132 |
parts = response.split("\n", 1)
|
133 |
if len(parts) > 1:
|
134 |
before_indent = f'<span style="color: orange;">{parts[0].strip()}</span>'
|
@@ -166,11 +114,11 @@ with gr.Blocks() as app:
|
|
166 |
gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
|
167 |
|
168 |
with gr.Row():
|
169 |
-
with gr.Column():
|
170 |
user_input = gr.Textbox(label="What will you say to Gertrude?", placeholder="Type something here... Expect 1-2 Minute Response Times...")
|
171 |
submit_button = gr.Button("Send")
|
172 |
-
with gr.Column():
|
173 |
-
chatbot = gr.
|
174 |
|
175 |
# Adding custom styling for the UI
|
176 |
gr.HTML("""
|
@@ -181,6 +129,10 @@ with gr.Blocks() as app:
|
|
181 |
border-radius: 15px;
|
182 |
font-family: 'Comic Sans MS';
|
183 |
}
|
|
|
|
|
|
|
|
|
184 |
</style>
|
185 |
""")
|
186 |
|
|
|
|
|
1 |
import torch
|
2 |
import torch.nn as nn
|
3 |
import random
|
|
|
52 |
return f"I'm sorry to hear that: {response}. How can I assist you further?"
|
53 |
return response
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
# ---- Response Generation ----
|
56 |
def generate_response(prompt, max_length=512):
|
57 |
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
|
|
76 |
|
77 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
78 |
|
79 |
+
# Split response into two parts and apply color
|
80 |
parts = response.split("\n", 1)
|
81 |
if len(parts) > 1:
|
82 |
before_indent = f'<span style="color: orange;">{parts[0].strip()}</span>'
|
|
|
114 |
gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
|
115 |
|
116 |
with gr.Row():
|
117 |
+
with gr.Column(scale=1):
|
118 |
user_input = gr.Textbox(label="What will you say to Gertrude?", placeholder="Type something here... Expect 1-2 Minute Response Times...")
|
119 |
submit_button = gr.Button("Send")
|
120 |
+
with gr.Column(scale=1):
|
121 |
+
chatbot = gr.Textbox(label="Gertrude's Response", interactive=False) # This is now a Textbox for output
|
122 |
|
123 |
# Adding custom styling for the UI
|
124 |
gr.HTML("""
|
|
|
129 |
border-radius: 15px;
|
130 |
font-family: 'Comic Sans MS';
|
131 |
}
|
132 |
+
.gradio-row {
|
133 |
+
display: flex;
|
134 |
+
justify-content: space-between;
|
135 |
+
}
|
136 |
</style>
|
137 |
""")
|
138 |
|