v2 missed
Browse files- NdR_male_superheros.py +245 -184
NdR_male_superheros.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import numpy as np
|
2 |
import torch
|
3 |
import torch.nn as nn
|
@@ -10,187 +11,247 @@ from sklearn.preprocessing import StandardScaler
|
|
10 |
np.random.seed(42)
|
11 |
torch.manual_seed(42)
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
X
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
#
|
37 |
-
|
38 |
-
|
39 |
-
'
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
'
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
'
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
'
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
'
|
56 |
-
|
57 |
-
|
58 |
-
}
|
59 |
-
|
60 |
-
|
61 |
-
for
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
# Define Linear Model
|
107 |
-
class LinearModel(nn.Module):
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
# Initialize Linear Model
|
116 |
-
input_dim = total_features
|
117 |
-
output_dim = num_classes
|
118 |
-
linear_model = LinearModel(input_dim, output_dim)
|
119 |
-
|
120 |
-
# Loss and optimizer for Linear Model
|
121 |
-
criterion = nn.CrossEntropyLoss()
|
122 |
-
optimizer = optim.SGD(linear_model.parameters(), lr=0.01, weight_decay=1e-4)
|
123 |
-
|
124 |
-
# Training the Linear Model
|
125 |
-
num_epochs = 100
|
126 |
-
for epoch in range(num_epochs):
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
# Evaluate Linear Model
|
138 |
-
linear_model.eval()
|
139 |
-
with torch.no_grad():
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
in_dim
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
import torch.nn as nn
|
|
|
11 |
np.random.seed(42)
|
12 |
torch.manual_seed(42)
|
13 |
|
14 |
+
def run_male_superhero_train():
|
15 |
+
# Number of samples per superhero
|
16 |
+
N_per_class = 200
|
17 |
+
|
18 |
+
# List of superheroes
|
19 |
+
superheroes = ['Iron Man', 'Hulk', 'Flash', 'Batman', 'Thor']
|
20 |
+
|
21 |
+
# Total number of classes
|
22 |
+
num_classes = len(superheroes)
|
23 |
+
|
24 |
+
# Total number of samples
|
25 |
+
N = N_per_class * num_classes
|
26 |
+
|
27 |
+
# Number of original features
|
28 |
+
D = 5 # Strength, Speed, Intelligence, Durability, Energy Projection
|
29 |
+
|
30 |
+
# Update the total number of features after adding the interaction term
|
31 |
+
total_features = D + 1 # Original features plus the interaction term
|
32 |
+
|
33 |
+
# Initialize feature matrix X and label vector y
|
34 |
+
X = np.zeros((N, total_features))
|
35 |
+
y = np.zeros(N, dtype=int)
|
36 |
+
|
37 |
+
# Define the mean and standard deviation for each feature per superhero
|
38 |
+
# Features: [Strength, Speed, Intelligence, Durability, Energy Projection]
|
39 |
+
superhero_stats = {
|
40 |
+
'Iron Man': {
|
41 |
+
'mean': [7, 7, 9, 8, 8],
|
42 |
+
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
|
43 |
+
},
|
44 |
+
'Hulk': {
|
45 |
+
'mean': [10, 5, 3, 10, 2],
|
46 |
+
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
|
47 |
+
},
|
48 |
+
'Flash': {
|
49 |
+
'mean': [4, 10, 6, 5, 3],
|
50 |
+
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
|
51 |
+
},
|
52 |
+
'Batman': {
|
53 |
+
'mean': [5, 6, 9, 6, 2],
|
54 |
+
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
|
55 |
+
},
|
56 |
+
'Thor': {
|
57 |
+
'mean': [10, 8, 7, 10, 9],
|
58 |
+
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
|
59 |
+
},
|
60 |
+
}
|
61 |
+
|
62 |
+
# Generate synthetic data for each superhero with non-linear relationships
|
63 |
+
for idx, hero in enumerate(superheroes):
|
64 |
+
start = idx * N_per_class
|
65 |
+
end = (idx + 1) * N_per_class
|
66 |
+
means = superhero_stats[hero]['mean']
|
67 |
+
stds = superhero_stats[hero]['std']
|
68 |
+
X_hero = np.random.normal(means, stds, (N_per_class, D))
|
69 |
+
# Ensure feature values are within reasonable ranges before computing interaction
|
70 |
+
X_hero = np.clip(X_hero, 1, 10)
|
71 |
+
# Introduce non-linear feature interactions
|
72 |
+
interaction_term = np.sin(X_hero[:, 0]) * np.log(X_hero[:, 2])
|
73 |
+
X_hero = np.hstack((X_hero, interaction_term.reshape(-1, 1)))
|
74 |
+
X[start:end] = X_hero
|
75 |
+
y[start:end] = idx
|
76 |
+
|
77 |
+
# Ensure all feature values are within reasonable ranges
|
78 |
+
X[:, :D] = np.clip(X[:, :D], 1, 10)
|
79 |
+
|
80 |
+
# Shuffle the dataset
|
81 |
+
X, y = shuffle(X, y, random_state=42)
|
82 |
+
|
83 |
+
# Normalize the features
|
84 |
+
scaler = StandardScaler()
|
85 |
+
X = scaler.fit_transform(X)
|
86 |
+
|
87 |
+
# Split data into training and test sets
|
88 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
89 |
+
X, y, test_size=0.2, random_state=42)
|
90 |
+
|
91 |
+
# Convert data to torch tensors
|
92 |
+
X_train_tensor = torch.from_numpy(X_train).float()
|
93 |
+
y_train_tensor = torch.from_numpy(y_train).long()
|
94 |
+
X_test_tensor = torch.from_numpy(X_test).float()
|
95 |
+
y_test_tensor = torch.from_numpy(y_test).long()
|
96 |
+
|
97 |
+
# Random prediction function
|
98 |
+
def random_prediction(X):
|
99 |
+
num_samples = X.shape[0]
|
100 |
+
random_preds = np.random.randint(num_classes, size=num_samples)
|
101 |
+
return random_preds
|
102 |
+
|
103 |
+
# Random prediction and evaluation
|
104 |
+
random_preds = random_prediction(X_test)
|
105 |
+
random_accuracy = (random_preds == y_test).sum() / y_test.size
|
106 |
+
|
107 |
+
# Define Linear Model
|
108 |
+
class LinearModel(nn.Module):
|
109 |
+
def __init__(self, input_dim, output_dim):
|
110 |
+
super(LinearModel, self).__init__()
|
111 |
+
self.linear = nn.Linear(input_dim, output_dim)
|
112 |
+
|
113 |
+
def forward(self, x):
|
114 |
+
return self.linear(x)
|
115 |
+
|
116 |
+
# Initialize Linear Model
|
117 |
+
input_dim = total_features
|
118 |
+
output_dim = num_classes
|
119 |
+
linear_model = LinearModel(input_dim, output_dim)
|
120 |
+
|
121 |
+
# Loss and optimizer for Linear Model
|
122 |
+
criterion = nn.CrossEntropyLoss()
|
123 |
+
optimizer = optim.SGD(linear_model.parameters(), lr=0.01, weight_decay=1e-4)
|
124 |
+
|
125 |
+
# Training the Linear Model
|
126 |
+
num_epochs = 100
|
127 |
+
for epoch in range(num_epochs):
|
128 |
+
linear_model.train()
|
129 |
+
outputs = linear_model(X_train_tensor)
|
130 |
+
loss = criterion(outputs, y_train_tensor)
|
131 |
+
optimizer.zero_grad()
|
132 |
+
loss.backward()
|
133 |
+
optimizer.step()
|
134 |
+
if (epoch + 1) % 25 == 0:
|
135 |
+
st.write('Modello Lineare - Epoch [{}/{}], Loss: {:.4f}'.format(
|
136 |
+
epoch + 1, num_epochs, loss.item()))
|
137 |
+
|
138 |
+
# Evaluate Linear Model
|
139 |
+
linear_model.eval()
|
140 |
+
with torch.no_grad():
|
141 |
+
outputs = linear_model(X_test_tensor)
|
142 |
+
_, predicted = torch.max(outputs.data, 1)
|
143 |
+
linear_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
|
144 |
+
|
145 |
+
# Define Neural Network Model with regularization
|
146 |
+
class NeuralNet(nn.Module):
|
147 |
+
def __init__(self, input_dim, hidden_dims, output_dim):
|
148 |
+
super(NeuralNet, self).__init__()
|
149 |
+
layers = []
|
150 |
+
in_dim = input_dim
|
151 |
+
for h_dim in hidden_dims:
|
152 |
+
layers.append(nn.Linear(in_dim, h_dim))
|
153 |
+
layers.append(nn.ReLU())
|
154 |
+
layers.append(nn.BatchNorm1d(h_dim))
|
155 |
+
layers.append(nn.Dropout(0.3))
|
156 |
+
in_dim = h_dim
|
157 |
+
layers.append(nn.Linear(in_dim, output_dim))
|
158 |
+
self.model = nn.Sequential(*layers)
|
159 |
+
|
160 |
+
def forward(self, x):
|
161 |
+
return self.model(x)
|
162 |
+
|
163 |
+
# Initialize Neural Network Model
|
164 |
+
hidden_dims = [128, 64, 32]
|
165 |
+
neural_model = NeuralNet(input_dim, hidden_dims, output_dim)
|
166 |
+
|
167 |
+
# Loss and optimizer for Neural Network Model
|
168 |
+
criterion = nn.CrossEntropyLoss()
|
169 |
+
optimizer = optim.Adam(neural_model.parameters(), lr=0.001, weight_decay=1e-4)
|
170 |
+
|
171 |
+
# Training the Neural Network Model
|
172 |
+
num_epochs = 2#00
|
173 |
+
for epoch in range(num_epochs):
|
174 |
+
neural_model.train()
|
175 |
+
outputs = neural_model(X_train_tensor)
|
176 |
+
loss = criterion(outputs, y_train_tensor)
|
177 |
+
optimizer.zero_grad()
|
178 |
+
loss.backward()
|
179 |
+
optimizer.step()
|
180 |
+
if (epoch + 1) % 20 == 0:
|
181 |
+
st.write('Rete Neurale - Epoch [{}/{}], Loss: {:.4f}'.format(
|
182 |
+
epoch + 1, num_epochs, loss.item()))
|
183 |
+
|
184 |
+
# Evaluate Neural Network Model
|
185 |
+
neural_model.eval()
|
186 |
+
with torch.no_grad():
|
187 |
+
outputs = neural_model(X_test_tensor)
|
188 |
+
_, predicted = torch.max(outputs.data, 1)
|
189 |
+
neural_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
|
190 |
+
|
191 |
+
# Summary of Accuracies
|
192 |
+
st.write("\nRiepilogo delle Accuratezze:....")
|
193 |
+
st.error('Accuratezza Previsione Casuale: {:.2f}%'.format(100 * random_accuracy))
|
194 |
+
st.warning('Accuratezza Modello Lineare: {:.2f}%'.format(100 * linear_accuracy))
|
195 |
+
st.success('Accuratezza Rete Neurale: {:.2f}%'.format(100 * neural_accuracy))
|
196 |
+
|
197 |
+
return linear_model, neural_model, scaler, superheroes, num_classes
|
198 |
+
|
199 |
+
def get_user_input_and_predict_male_superhero(linear_model, neural_model, scaler, superheroes, num_classes):
|
200 |
+
st.write("Adjust the sliders for the following superhero attributes on a scale from 1 to 10:")
|
201 |
+
|
202 |
+
# Feature names corresponding to superhero attributes
|
203 |
+
feature_names = ['Forza', 'Velocità', 'Intelligenza', 'Resistenza', 'Proiezione di Energia']
|
204 |
+
|
205 |
+
# Initialize or retrieve user input from session state to preserve values across reruns
|
206 |
+
if 'user_features' not in st.session_state:
|
207 |
+
st.session_state.user_features = [5] * len(feature_names) # Default slider values set to 5
|
208 |
+
|
209 |
+
# Create a form to group sliders and button
|
210 |
+
with st.form(key='superhero_form'):
|
211 |
+
for i, feature in enumerate(feature_names):
|
212 |
+
st.session_state.user_features[i] = st.slider(
|
213 |
+
feature, 1, 10, st.session_state.user_features[i], key=f'slider_{i}'
|
214 |
+
)
|
215 |
+
|
216 |
+
# Form submission button
|
217 |
+
submit_button = st.form_submit_button(label='Calcola Previsioni')
|
218 |
+
|
219 |
+
# Proceed with prediction if the form is submitted
|
220 |
+
if submit_button:
|
221 |
+
# Copy user input values (superhero attributes)
|
222 |
+
user_features = st.session_state.user_features.copy()
|
223 |
+
|
224 |
+
# Calculate the interaction term (interaction between Strength and Intelligence)
|
225 |
+
interaction_term = np.sin(user_features[0]) * np.log(user_features[2])
|
226 |
+
|
227 |
+
# Append the interaction term to the original features
|
228 |
+
user_features.append(interaction_term)
|
229 |
+
|
230 |
+
# Convert to numpy array and reshape to match the expected input shape
|
231 |
+
user_features = np.array(user_features).reshape(1, -1)
|
232 |
+
|
233 |
+
# Normalize user inputs using the scaler that was fit during training
|
234 |
+
user_features_scaled = scaler.transform(user_features)
|
235 |
+
|
236 |
+
# Convert the scaled input into a torch tensor
|
237 |
+
user_tensor = torch.from_numpy(user_features_scaled).float()
|
238 |
+
|
239 |
+
# Make a random prediction for comparison
|
240 |
+
random_pred = np.random.randint(num_classes)
|
241 |
+
st.error(f"Previsione Casuale: {superheroes[random_pred]}")
|
242 |
+
|
243 |
+
# **Linear Model Prediction**
|
244 |
+
linear_model.eval() # Set model to evaluation mode
|
245 |
+
with torch.no_grad():
|
246 |
+
outputs = linear_model(user_tensor)
|
247 |
+
_, predicted = torch.max(outputs.data, 1)
|
248 |
+
linear_pred = predicted.item()
|
249 |
+
st.warning(f"Previsione Modello Lineare: {superheroes[linear_pred]}")
|
250 |
+
|
251 |
+
# **Neural Network Prediction**
|
252 |
+
neural_model.eval() # Set model to evaluation mode
|
253 |
+
with torch.no_grad():
|
254 |
+
outputs = neural_model(user_tensor)
|
255 |
+
_, predicted = torch.max(outputs.data, 1)
|
256 |
+
neural_pred = predicted.item()
|
257 |
+
st.success(f"Previsione Rete Neurale: {superheroes[neural_pred]}")
|