sayyedAhmed commited on
Commit
b1f4374
·
1 Parent(s): 7d950d3
Files changed (2) hide show
  1. config.json +16 -10
  2. inference.py +0 -60
config.json CHANGED
@@ -1,10 +1,16 @@
1
- {
2
- "architectures": ["LSTMPredictor"],
3
- "input_dim": 10,
4
- "hidden_dim": 64,
5
- "output_dim": 1,
6
- "forecast_horizon": 3,
7
- "n_layers": 2,
8
- "dropout": 0.2
9
- }
10
-
 
 
 
 
 
 
 
1
+
2
+ {
3
+ "input_dim": 5,
4
+ "hidden_dim": 32,
5
+ "output_dim": 1,
6
+ "forecast_horizon": 3,
7
+ "n_layers": 2,
8
+ "dropout": 0.2,
9
+ "seq_length": 10,
10
+ "batch_size": 16,
11
+ "device": "cpu",
12
+ "model_path": "lstm_crisis_severity_predictor_20241116_092126.pt",
13
+ "repo_id": "your_username/your_model_repo",
14
+ "data_path": "test_data.npy",
15
+ "output_path": "predictions.npy"
16
+ }
inference.py CHANGED
@@ -22,64 +22,4 @@
22
 
23
  # # Print the response (the predictions)
24
  # print(response.json())
25
- import torch
26
- import numpy as np
27
-
28
- # Define the model architecture (this should match the one used during training)
29
- class LSTMPredictor(torch.nn.Module):
30
- def __init__(self, input_dim, hidden_dim, output_dim, forecast_horizon, n_layers, dropout):
31
- super(LSTMPredictor, self).__init__()
32
- self.lstm = torch.nn.LSTM(input_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True)
33
- self.fc = torch.nn.Linear(hidden_dim, output_dim)
34
- self.forecast_horizon = forecast_horizon
35
-
36
- def forward(self, x):
37
- # Forward pass through LSTM
38
- lstm_out, _ = self.lstm(x)
39
- # Only get the output from the last time step
40
- out = self.fc(lstm_out[:, -1, :])
41
- return out
42
-
43
- # Load the model
44
- def load_model(model_path):
45
- model_state = torch.load(model_path)
46
- model = LSTMPredictor(
47
- input_dim=model_state['model_architecture']['input_dim'],
48
- hidden_dim=model_state['model_architecture']['hidden_dim'],
49
- output_dim=model_state['model_architecture']['output_dim'],
50
- forecast_horizon=model_state['model_architecture']['forecast_horizon'],
51
- n_layers=model_state['model_architecture']['n_layers'],
52
- dropout=model_state['model_architecture']['dropout']
53
- )
54
- model.load_state_dict(model_state['model_state_dict'])
55
- model.eval() # Set model to evaluation mode
56
- return model
57
-
58
- # Inference function
59
- def predict(model, features):
60
- # Convert input features to tensor
61
- input_tensor = torch.FloatTensor(features)
62
-
63
- # Get model prediction
64
- with torch.no_grad():
65
- predictions = model(input_tensor).numpy() # No gradients needed for inference
66
-
67
- return predictions.tolist()
68
-
69
- # Main function to load the model and make predictions
70
- if __name__ == "__main__":
71
- # Load the trained model
72
- model = load_model('lstm_crisis_severity_predictor_20241116_092126.pt') # Replace with actual model path
73
-
74
- # Example input data (features) - Replace with actual features
75
- test_features = np.array([[1.23, 4.56, 7.89, 10.11]]) # Example test features
76
-
77
- # Reshape for LSTM: (batch_size, seq_len, input_dim)
78
- test_features = test_features.reshape((test_features.shape[0], 1, test_features.shape[1]))
79
-
80
- # Get predictions
81
- predictions = predict(model, test_features)
82
-
83
- # Output predictions
84
- print("Predictions:", predictions)
85
 
 
22
 
23
  # # Print the response (the predictions)
24
  # print(response.json())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25