|
import torch |
|
import torch.nn as nn |
|
|
|
|
|
class RNNModel(nn.Module): |
|
def __init__(self, input_size, hidden_size, output_size): |
|
super(RNNModel, self).__init__() |
|
self.rnn = nn.RNN(input_size, hidden_size, batch_first=True) |
|
self.fc = nn.Linear(hidden_size, output_size) |
|
|
|
def forward(self, x): |
|
out, _ = self.rnn(x) |
|
out = out[:, -1, :] |
|
out = self.fc(out) |
|
return out |
|
|
|
|
|
input_size = 10 |
|
hidden_size = 20 |
|
output_size = 1 |
|
model = RNNModel(input_size, hidden_size, output_size) |
|
|
|
|
|
X = torch.randn(32, 5, 10) |
|
y = torch.randn(32, 1) |
|
|
|
|
|
criterion = nn.MSELoss() |
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.001) |
|
|
|
|
|
for epoch in range(100): |
|
model.train() |
|
optimizer.zero_grad() |
|
output = model(X) |
|
loss = criterion(output, y) |
|
loss.backward() |
|
optimizer.step() |
|
|
|
if (epoch + 1) % 10 == 0: |
|
print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}') |
|
|