import torch import torch.nn as nn class BiLSTM(nn.Module): def __init__(self, input_size, hidden_size, num_layers, output_size): super(BiLSTM, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True) self.fc = nn.Linear(hidden_size*2, output_size) # Умножаем на 2 из-за двунаправленности def forward(self, x): h0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(x.device) # 2 для bidirectional c0 = torch.zeros(self.num_layers*2, x.size(0), self.hidden_size).to(x.device) out, _ = self.lstm(x, (h0, c0)) out = self.fc(out[:, -1, :]) return out # device = 'cpu' # model = BiLSTM().to(device)