Spaces:
Running
Running
Update pages/17_RNN.py
Browse files- pages/17_RNN.py +88 -196
pages/17_RNN.py
CHANGED
@@ -1,205 +1,97 @@
|
|
1 |
import streamlit as st
|
2 |
import torch
|
3 |
import torch.nn as nn
|
4 |
-
import torch.optim as optim
|
5 |
-
from torchtext.data.utils import get_tokenizer
|
6 |
-
from torchtext.vocab import build_vocab_from_iterator
|
7 |
-
from torchtext.datasets import IMDB
|
8 |
-
from torch.utils.data import DataLoader, random_split
|
9 |
-
import matplotlib.pyplot as plt
|
10 |
-
import seaborn as sns
|
11 |
-
import pandas as pd
|
12 |
import numpy as np
|
13 |
-
from collections import Counter
|
14 |
-
from torch.nn.utils.rnn import pad_sequence
|
15 |
|
16 |
-
# Define the
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
class RNN(nn.Module):
|
18 |
-
def __init__(self,
|
19 |
super(RNN, self).__init__()
|
20 |
-
self.
|
21 |
-
self.
|
22 |
-
self.
|
23 |
-
self.
|
24 |
-
|
25 |
-
def forward(self,
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
return
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
for epoch in range(epochs):
|
86 |
-
epoch_loss = 0
|
87 |
-
net.train()
|
88 |
-
for texts, labels, _ in iterator:
|
89 |
-
texts, labels = texts.to(device), labels.to(device)
|
90 |
-
optimizer.zero_grad()
|
91 |
-
predictions = net(texts).squeeze(1)
|
92 |
-
loss = criterion(predictions, labels)
|
93 |
-
loss.backward()
|
94 |
-
optimizer.step()
|
95 |
-
epoch_loss += loss.item()
|
96 |
-
epoch_loss /= len(iterator)
|
97 |
-
loss_values.append(epoch_loss)
|
98 |
-
st.write(f'Epoch {epoch + 1}: loss {epoch_loss:.3f}')
|
99 |
-
st.write('Finished Training')
|
100 |
-
return loss_values
|
101 |
-
|
102 |
-
# Function to evaluate the network
|
103 |
-
def evaluate_network(net, iterator, criterion):
|
104 |
-
epoch_loss = 0
|
105 |
-
correct = 0
|
106 |
-
total = 0
|
107 |
-
all_labels = []
|
108 |
-
all_predictions = []
|
109 |
-
net.eval()
|
110 |
-
with torch.no_grad():
|
111 |
-
for texts, labels, _ in iterator:
|
112 |
-
texts, labels = texts.to(device), labels.to(device)
|
113 |
-
predictions = net(texts).squeeze(1)
|
114 |
-
loss = criterion(predictions, labels)
|
115 |
-
epoch_loss += loss.item()
|
116 |
-
rounded_preds = torch.round(torch.sigmoid(predictions))
|
117 |
-
correct += (rounded_preds == labels).sum().item()
|
118 |
-
total += len(labels)
|
119 |
-
all_labels.extend(labels.cpu().numpy())
|
120 |
-
all_predictions.extend(rounded_preds.cpu().numpy())
|
121 |
-
accuracy = 100 * correct / total
|
122 |
-
st.write(f'Loss: {epoch_loss / len(iterator):.4f}, Accuracy: {accuracy:.2f}%')
|
123 |
-
return accuracy, all_labels, all_predictions
|
124 |
-
|
125 |
-
# Load the data
|
126 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
127 |
-
|
128 |
-
# Display a loading message with some vertical space
|
129 |
-
st.markdown("<div style='margin-top: 50px;'><b>Loading data...</b></div>", unsafe_allow_html=True)
|
130 |
-
vocab, train_loader, valid_loader, test_loader = load_data()
|
131 |
|
132 |
# Streamlit interface
|
133 |
-
st.title(
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
n_layers = st.sidebar.slider('Number of RNN Layers', 1, 3, 2)
|
144 |
-
dropout = st.sidebar.slider('Dropout', 0.0, 0.5, 0.2, step=0.1)
|
145 |
-
learning_rate = st.sidebar.slider('Learning Rate', 0.001, 0.1, 0.01, step=0.001)
|
146 |
-
epochs = st.sidebar.slider('Epochs', 1, 20, 5)
|
147 |
-
|
148 |
-
# Create the network
|
149 |
-
vocab_size = len(vocab)
|
150 |
-
output_size = 1
|
151 |
-
net = RNN(vocab_size, embed_size, hidden_size, output_size, n_layers, dropout).to(device)
|
152 |
-
criterion = nn.BCEWithLogitsLoss()
|
153 |
-
optimizer = optim.Adam(net.parameters(), lr=learning_rate)
|
154 |
-
|
155 |
-
# Add vertical space
|
156 |
-
st.write('\n' * 10)
|
157 |
-
|
158 |
-
# Train the network
|
159 |
-
if st.sidebar.button('Train Network'):
|
160 |
-
loss_values = train_network(net, train_loader, optimizer, criterion, epochs)
|
161 |
-
|
162 |
-
# Plot the loss values
|
163 |
-
plt.figure(figsize=(10, 5))
|
164 |
-
plt.plot(range(1, epochs + 1), loss_values, marker='o')
|
165 |
-
plt.title('Training Loss Over Epochs')
|
166 |
-
plt.xlabel('Epoch')
|
167 |
-
plt.ylabel('Loss')
|
168 |
-
plt.grid(True)
|
169 |
-
st.pyplot(plt)
|
170 |
-
|
171 |
-
# Store the trained model in the session state
|
172 |
-
st.session_state['trained_model'] = net
|
173 |
-
|
174 |
-
# Test the network
|
175 |
-
if 'trained_model' in st.session_state and st.sidebar.button('Test Network'):
|
176 |
-
accuracy, all_labels, all_predictions = evaluate_network(st.session_state['trained_model'], test_loader, criterion)
|
177 |
-
st.write(f'Test Accuracy: {accuracy:.2f}%')
|
178 |
-
|
179 |
-
# Display results in a table
|
180 |
-
st.write('Ground Truth vs Predicted')
|
181 |
-
results = pd.DataFrame({
|
182 |
-
'Ground Truth': all_labels,
|
183 |
-
'Predicted': all_predictions
|
184 |
-
})
|
185 |
-
st.table(results.head(50)) # Display first 50 results for brevity
|
186 |
-
|
187 |
-
# Visualize some test results
|
188 |
-
def visualize_text_predictions(iterator, net):
|
189 |
-
net.eval()
|
190 |
-
samples = []
|
191 |
-
with torch.no_grad():
|
192 |
-
for texts, labels, _ in iterator:
|
193 |
-
predictions = torch.round(torch.sigmoid(net(texts).squeeze(1)))
|
194 |
-
samples.extend(zip(texts.cpu(), labels.cpu(), predictions.cpu()))
|
195 |
-
if len(samples) >= 10:
|
196 |
-
break
|
197 |
-
return samples[:10]
|
198 |
-
|
199 |
-
if 'trained_model' in st.session_state and st.sidebar.button('Show Test Results'):
|
200 |
-
samples = visualize_text_predictions(test_loader, st.session_state['trained_model'])
|
201 |
-
st.write('Ground Truth vs Predicted for Sample Texts')
|
202 |
-
for i, (text, true_label, predicted) in enumerate(samples):
|
203 |
-
st.write(f'Sample {i+1}')
|
204 |
-
st.text(' '.join([vocab.get_itos()[token] for token in text]))
|
205 |
-
st.write(f'Ground Truth: {true_label.item()}, Predicted: {predicted.item()}')
|
|
|
1 |
import streamlit as st
|
2 |
import torch
|
3 |
import torch.nn as nn
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
import numpy as np
|
|
|
|
|
5 |
|
6 |
+
# Define the dataset
|
7 |
+
sequence = "hellohellohello"
|
8 |
+
chars = list(set(sequence))
|
9 |
+
data_size, vocab_size = len(sequence), len(chars)
|
10 |
+
|
11 |
+
# Create mappings from characters to indices and vice versa
|
12 |
+
char_to_idx = {ch: i for i, ch in enumerate(chars)}
|
13 |
+
idx_to_char = {i: ch for i, ch in enumerate(chars)}
|
14 |
+
|
15 |
+
# Convert the sequence to indices
|
16 |
+
indices = np.array([char_to_idx[ch] for ch in sequence])
|
17 |
+
|
18 |
class RNN(nn.Module):
|
19 |
+
def __init__(self, input_size, hidden_size, output_size):
|
20 |
super(RNN, self).__init__()
|
21 |
+
self.hidden_size = hidden_size
|
22 |
+
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
|
23 |
+
self.i2o = nn.Linear(input_size + hidden_size, output_size)
|
24 |
+
self.softmax = nn.LogSoftmax(dim=1)
|
25 |
+
|
26 |
+
def forward(self, input, hidden):
|
27 |
+
combined = torch.cat((input, hidden), 1)
|
28 |
+
hidden = self.i2h(combined)
|
29 |
+
output = self.i2o(combined)
|
30 |
+
output = self.softmax(output)
|
31 |
+
return output, hidden
|
32 |
+
|
33 |
+
def init_hidden(self):
|
34 |
+
return torch.zeros(1, self.hidden_size)
|
35 |
+
|
36 |
+
# Hyperparameters
|
37 |
+
n_hidden = 128
|
38 |
+
learning_rate = 0.005
|
39 |
+
n_epochs = 500
|
40 |
+
|
41 |
+
# Initialize the model, loss function, and optimizer
|
42 |
+
rnn = RNN(vocab_size, n_hidden, vocab_size)
|
43 |
+
criterion = nn.NLLLoss()
|
44 |
+
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
|
45 |
+
|
46 |
+
def char_tensor(char):
|
47 |
+
tensor = torch.zeros(1, vocab_size)
|
48 |
+
tensor[0][char_to_idx[char]] = 1
|
49 |
+
return tensor
|
50 |
+
|
51 |
+
# Training loop
|
52 |
+
for epoch in range(n_epochs):
|
53 |
+
hidden = rnn.init_hidden()
|
54 |
+
rnn.zero_grad()
|
55 |
+
loss = 0
|
56 |
+
|
57 |
+
for i in range(data_size - 1):
|
58 |
+
input_char = char_tensor(sequence[i])
|
59 |
+
target_char = torch.tensor([char_to_idx[sequence[i + 1]]], dtype=torch.long)
|
60 |
+
|
61 |
+
output, hidden = rnn(input_char, hidden)
|
62 |
+
loss += criterion(output, target_char)
|
63 |
+
|
64 |
+
loss.backward()
|
65 |
+
optimizer.step()
|
66 |
+
|
67 |
+
if epoch % 10 == 0:
|
68 |
+
print(f'Epoch {epoch} loss: {loss.item() / (data_size - 1)}')
|
69 |
+
|
70 |
+
print("Training complete.")
|
71 |
+
|
72 |
+
def generate(start_char, predict_len=100):
|
73 |
+
hidden = rnn.init_hidden()
|
74 |
+
input_char = char_tensor(start_char)
|
75 |
+
predicted_str = start_char
|
76 |
+
|
77 |
+
for _ in range(predict_len):
|
78 |
+
output, hidden = rnn(input_char, hidden)
|
79 |
+
topv, topi = output.topk(1)
|
80 |
+
predicted_char_idx = topi[0][0].item()
|
81 |
+
predicted_char = idx_to_char[predicted_char_idx]
|
82 |
+
predicted_str += predicted_char
|
83 |
+
input_char = char_tensor(predicted_char)
|
84 |
+
|
85 |
+
return predicted_str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
# Streamlit interface
|
88 |
+
st.title('RNN Character Prediction')
|
89 |
+
st.write('This app uses a Recurrent Neural Network (RNN) to predict the next character in a given string.')
|
90 |
+
|
91 |
+
start_char = st.text_input('Enter a starting character:', 'h')
|
92 |
+
predict_len = st.slider('Select the length of the generated text:', min_value=10, max_value=200, value=50)
|
93 |
+
|
94 |
+
if st.button('Generate Text'):
|
95 |
+
generated_text = generate(start_char, predict_len)
|
96 |
+
st.write('Generated Text:')
|
97 |
+
st.text(generated_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|