Spaces:
Running
Running
Update pages/19_RNN_LSTM_Shakespeare.py
Browse files
pages/19_RNN_LSTM_Shakespeare.py
CHANGED
@@ -22,7 +22,7 @@ class LSTMModel(nn.Module):
|
|
22 |
def generate_text(model, start_str, length, char_to_int, int_to_char, num_layers, hidden_size):
|
23 |
model.eval()
|
24 |
input_seq = [char_to_int[c] for c in start_str]
|
25 |
-
input_seq = torch.tensor(input_seq, dtype=torch.long).unsqueeze(0)
|
26 |
h = (torch.zeros(num_layers, 1, hidden_size), torch.zeros(num_layers, 1, hidden_size))
|
27 |
generated_text = start_str
|
28 |
|
@@ -31,7 +31,7 @@ def generate_text(model, start_str, length, char_to_int, int_to_char, num_layers
|
|
31 |
_, predicted = torch.max(output, 1)
|
32 |
predicted_char = int_to_char[predicted.item()]
|
33 |
generated_text += predicted_char
|
34 |
-
input_seq = torch.tensor([[char_to_int[predicted_char]]], dtype=torch.long)
|
35 |
|
36 |
return generated_text
|
37 |
|
@@ -39,7 +39,7 @@ def generate_text(model, start_str, length, char_to_int, int_to_char, num_layers
|
|
39 |
st.title("RNN/LSTM Text Generation")
|
40 |
|
41 |
# Inputs
|
42 |
-
text_data = st.text_area("Enter your text data for training:")
|
43 |
start_string = st.text_input("Enter the start string for text generation:")
|
44 |
seq_length = st.number_input("Sequence length:", min_value=10, value=100)
|
45 |
hidden_size = st.number_input("Hidden size:", min_value=50, value=256)
|
@@ -70,7 +70,7 @@ if st.button("Train and Generate"):
|
|
70 |
if len(dataX) == 0:
|
71 |
st.error("Not enough data to create input-output pairs. Please provide more text data.")
|
72 |
else:
|
73 |
-
X = np.reshape(dataX, (len(dataX), seq_length))
|
74 |
X = X / float(len(chars))
|
75 |
Y = np.array(dataY)
|
76 |
|
@@ -87,15 +87,15 @@ if st.button("Train and Generate"):
|
|
87 |
|
88 |
# Training the model
|
89 |
for epoch in range(num_epochs):
|
90 |
-
h = (torch.zeros(num_layers,
|
91 |
epoch_loss = 0
|
92 |
for i in range(len(dataX)):
|
93 |
-
inputs = X_tensor[i].unsqueeze(0)
|
94 |
targets = Y_tensor[i].unsqueeze(0)
|
95 |
|
96 |
# Forward pass
|
97 |
-
outputs, h = model(inputs,
|
98 |
-
h = (h[0].detach()
|
99 |
loss = criterion(outputs, targets)
|
100 |
|
101 |
# Backward pass and optimization
|
|
|
22 |
def generate_text(model, start_str, length, char_to_int, int_to_char, num_layers, hidden_size):
|
23 |
model.eval()
|
24 |
input_seq = [char_to_int[c] for c in start_str]
|
25 |
+
input_seq = torch.tensor(input_seq, dtype=torch.long).unsqueeze(0).unsqueeze(-1)
|
26 |
h = (torch.zeros(num_layers, 1, hidden_size), torch.zeros(num_layers, 1, hidden_size))
|
27 |
generated_text = start_str
|
28 |
|
|
|
31 |
_, predicted = torch.max(output, 1)
|
32 |
predicted_char = int_to_char[predicted.item()]
|
33 |
generated_text += predicted_char
|
34 |
+
input_seq = torch.tensor([[char_to_int[predicted_char]]], dtype=torch.long).unsqueeze(0).unsqueeze(-1)
|
35 |
|
36 |
return generated_text
|
37 |
|
|
|
39 |
st.title("RNN/LSTM Text Generation")
|
40 |
|
41 |
# Inputs
|
42 |
+
text_data = st.text_area("Enter your text data for training:", "To be, or not to be, that is the question:\nWhether 'tis nobler in the mind to suffer\nThe slings and arrows of outrageous fortune,\nOr to take arms against a sea of troubles\nAnd by opposing end them. To die: to sleep;\nNo more; and by a sleep to say we end\nThe heart-ache and the thousand natural shocks\nThat flesh is heir to, 'tis a consummation\nDevoutly to be wish'd. To die, to sleep;\nTo sleep: perchance to dream: ay, there's the rub;\nFor in that sleep of death what dreams may come\nWhen we have shuffled off this mortal coil,\nMust give us pause: there's the respect\nThat makes calamity of so long life;")
|
43 |
start_string = st.text_input("Enter the start string for text generation:")
|
44 |
seq_length = st.number_input("Sequence length:", min_value=10, value=100)
|
45 |
hidden_size = st.number_input("Hidden size:", min_value=50, value=256)
|
|
|
70 |
if len(dataX) == 0:
|
71 |
st.error("Not enough data to create input-output pairs. Please provide more text data.")
|
72 |
else:
|
73 |
+
X = np.reshape(dataX, (len(dataX), seq_length, 1))
|
74 |
X = X / float(len(chars))
|
75 |
Y = np.array(dataY)
|
76 |
|
|
|
87 |
|
88 |
# Training the model
|
89 |
for epoch in range(num_epochs):
|
90 |
+
h = (torch.zeros(num_layers, X_tensor.size(0), hidden_size), torch.zeros(num_layers, X_tensor.size(0), hidden_size))
|
91 |
epoch_loss = 0
|
92 |
for i in range(len(dataX)):
|
93 |
+
inputs = X_tensor[i].unsqueeze(0)
|
94 |
targets = Y_tensor[i].unsqueeze(0)
|
95 |
|
96 |
# Forward pass
|
97 |
+
outputs, h = model(inputs, h)
|
98 |
+
h = (h[0].detach(), h[1].detach())
|
99 |
loss = criterion(outputs, targets)
|
100 |
|
101 |
# Backward pass and optimization
|