HTNotteDeiRicercatori24_Classifiers / NdR_female_superheros.py
soumickmj's picture
epoch fix
a6edcf1
import streamlit as st
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Set random seed for reproducibility
np.random.seed(42)
torch.manual_seed(42)
def run_female_superhero_train():
# Number of samples per superhero
N_per_class = 200
# List of female superheroes
superheroes = ['Wonder Woman', 'Captain Marvel', 'Vedova Nera', 'Tempesta', 'Supergirl']
# Total number of classes
num_classes = len(superheroes)
# Total number of samples
N = N_per_class * num_classes
# Number of original features
D = 5 # Strength, Speed, Intelligence, Durability, Energy Projection
# Update the total number of features after adding the interaction term
total_features = D + 1 # Original features plus the interaction term
# Initialize feature matrix X and label vector y
X = np.zeros((N, total_features))
y = np.zeros(N, dtype=int)
# Define the mean and standard deviation for each feature per superhero
# Features: [Strength, Speed, Intelligence, Durability, Energy Projection]
superhero_stats = {
'Wonder Woman': {
'mean': [9, 9, 8, 9, 8],
'std': [0.5, 0.5, 0.5, 0.5, 0.5]
},
'Captain Marvel': {
'mean': [10, 9, 7, 10, 10],
'std': [0.5, 0.5, 0.5, 0.5, 0.5]
},
'Vedova Nera': {
'mean': [5, 7, 8, 6, 2],
'std': [0.5, 0.5, 0.5, 0.5, 0.5]
},
'Tempesta': {
'mean': [6, 7, 8, 6, 9],
'std': [0.5, 0.5, 0.5, 0.5, 0.5]
},
'Supergirl': {
'mean': [10, 10, 8, 10, 9],
'std': [0.5, 0.5, 0.5, 0.5, 0.5]
},
}
# Generate synthetic data for each superhero with non-linear relationships
for idx, hero in enumerate(superheroes):
start = idx * N_per_class
end = (idx + 1) * N_per_class
means = superhero_stats[hero]['mean']
stds = superhero_stats[hero]['std']
X_hero = np.random.normal(means, stds, (N_per_class, D))
# Ensure feature values are within reasonable ranges before computing interaction
X_hero = np.clip(X_hero, 1, 10)
# Introduce non-linear feature interactions
interaction_term = np.sin(X_hero[:, 1]) * np.log(X_hero[:, 4]) # Interaction between Speed and Energy Projection
X_hero = np.hstack((X_hero, interaction_term.reshape(-1, 1)))
X[start:end] = X_hero
y[start:end] = idx
# Ensure all feature values are within reasonable ranges
X[:, :D] = np.clip(X[:, :D], 1, 10)
# Shuffle the dataset
X, y = shuffle(X, y, random_state=42)
# Normalize the features
scaler = StandardScaler()
X = scaler.fit_transform(X)
# Split data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
# Convert data to torch tensors
X_train_tensor = torch.from_numpy(X_train).float()
y_train_tensor = torch.from_numpy(y_train).long()
X_test_tensor = torch.from_numpy(X_test).float()
y_test_tensor = torch.from_numpy(y_test).long()
# Random prediction function
def random_prediction(X):
num_samples = X.shape[0]
random_preds = np.random.randint(num_classes, size=num_samples)
return random_preds
# Random prediction and evaluation
random_preds = random_prediction(X_test)
random_accuracy = (random_preds == y_test).sum() / y_test.size
# Define Linear Model
class LinearModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.linear(x)
# Initialize Linear Model
input_dim = total_features
output_dim = num_classes
linear_model = LinearModel(input_dim, output_dim)
# Loss and optimizer for Linear Model
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(linear_model.parameters(), lr=0.01, weight_decay=1e-4)
# Training the Linear Model
num_epochs = 100
for epoch in range(num_epochs):
linear_model.train()
outputs = linear_model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 25 == 0:
st.write('Modello Lineare - Epoch [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, loss.item()))
# Evaluate Linear Model
linear_model.eval()
with torch.no_grad():
outputs = linear_model(X_test_tensor)
_, predicted = torch.max(outputs.data, 1)
linear_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
# Define Neural Network Model with regularization
class NeuralNet(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim):
super(NeuralNet, self).__init__()
layers = []
in_dim = input_dim
for h_dim in hidden_dims:
layers.append(nn.Linear(in_dim, h_dim))
layers.append(nn.ReLU())
layers.append(nn.BatchNorm1d(h_dim))
layers.append(nn.Dropout(0.3))
in_dim = h_dim
layers.append(nn.Linear(in_dim, output_dim))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# Initialize Neural Network Model
hidden_dims = [128, 64, 32]
neural_model = NeuralNet(input_dim, hidden_dims, output_dim)
# Loss and optimizer for Neural Network Model
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(neural_model.parameters(), lr=0.001, weight_decay=1e-4)
# Training the Neural Network Model
num_epochs = 200
for epoch in range(num_epochs):
neural_model.train()
outputs = neural_model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 20 == 0:
st.write('Rete Neurale - Epoch [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, loss.item()))
# Evaluate Neural Network Model
neural_model.eval()
with torch.no_grad():
outputs = neural_model(X_test_tensor)
_, predicted = torch.max(outputs.data, 1)
neural_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
# Summary of Accuracies
st.write("\nRiepilogo delle Accuratezze:....")
st.error('Accuratezza Previsione Casuale: {:.2f}%'.format(100 * random_accuracy))
st.warning('Accuratezza Modello Lineare: {:.2f}%'.format(100 * linear_accuracy))
st.success('Accuratezza Rete Neurale: {:.2f}%'.format(100 * neural_accuracy))
return linear_model, neural_model, scaler, superheroes, num_classes
def get_user_input_and_predict_female_superhero(linear_model, neural_model, scaler, superheroes, num_classes):
st.write("Adjust the sliders for the following superhero attributes on a scale from 1 to 10:")
# Feature names corresponding to superhero attributes
feature_names = ['Forza', 'Velocità', 'Intelligenza', 'Resistenza', 'Proiezione di Energia']
# Initialize or retrieve user input from session state to preserve the values across reruns
if 'user_features' not in st.session_state:
st.session_state.user_features = [5] * len(feature_names) # Default slider values set to 5
# Create a form to group sliders and button
with st.form(key='superhero_form'):
for i, feature in enumerate(feature_names):
st.session_state.user_features[i] = st.slider(
feature, 1, 10, st.session_state.user_features[i], key=f'slider_{i}'
)
# Form submission button
submit_button = st.form_submit_button(label='Calcola Previsioni')
# Proceed with prediction if the form is submitted
if submit_button:
# Copy user input values (superhero attributes)
user_features = st.session_state.user_features.copy()
# Calculate interaction term (interaction between Speed and Energy Projection)
interaction_term = np.sin(user_features[1]) * np.log(user_features[4])
# Append the interaction term to the original features
user_features.append(interaction_term)
# Convert to numpy array and reshape to match the expected input shape
user_features = np.array(user_features).reshape(1, -1)
# Normalize user inputs using the scaler that was fit during training
user_features_scaled = scaler.transform(user_features)
# Convert the scaled input into a torch tensor
user_tensor = torch.from_numpy(user_features_scaled).float()
# Make a random prediction for comparison
random_pred = np.random.randint(num_classes)
st.error(f"Previsione Casuale: {superheroes[random_pred]}")
# **Linear Model Prediction**
linear_model.eval() # Set model to evaluation mode
with torch.no_grad():
outputs = linear_model(user_tensor)
_, predicted = torch.max(outputs.data, 1)
linear_pred = predicted.item()
st.warning(f"Previsione Modello Lineare: {superheroes[linear_pred]}")
# **Neural Network Prediction**
neural_model.eval() # Set model to evaluation mode
with torch.no_grad():
outputs = neural_model(user_tensor)
_, predicted = torch.max(outputs.data, 1)
neural_pred = predicted.item()
st.success(f"Previsione Rete Neurale: {superheroes[neural_pred]}")