Spaces:
Sleeping
Sleeping
import torch | |
import numpy as np | |
import pandas as pd | |
import torch.nn as nn | |
class TabularTransformer(nn.Module): | |
def __init__(self, input_dim=7, output_dim=1, embedding_dim=64, num_heads=8, hidden_dim=128): | |
super().__init__() | |
self.embedding = nn.Linear(input_dim, embedding_dim) | |
self.attention = nn.MultiheadAttention(embed_dim=embedding_dim, num_heads=num_heads) | |
self.fc = nn.Sequential( | |
nn.Linear(embedding_dim, hidden_dim), | |
nn.ReLU(), | |
nn.Linear(hidden_dim, output_dim) | |
) | |
def forward(self, x): | |
x = self.embedding(x) | |
x = x.unsqueeze(0) # Add sequence dimension for attention | |
attn_out, _ = self.attention(x, x, x) | |
x = attn_out.squeeze(0) # Remove sequence dimension | |
return self.fc(x) | |
def model_predict(model, X_input, scaler_X, scaler_y): | |
# Convert to tensor | |
X_scaled = scaler_X.transform(X_input) | |
X_tensor = torch.FloatTensor(X_scaled) | |
# Make prediction | |
with torch.no_grad(): | |
scaled_pred = model(X_tensor).numpy() | |
# Inverse transform to get original scale prediction | |
prediction = scaler_y.inverse_transform(scaled_pred) | |
return prediction.flatten() |