HTNotteDeiRicercatori24_Classifiers / NdR_male_superheros.py
soumickmj's picture
v0 trial
068b166
raw
history blame
6.43 kB
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Set random seed for reproducibility
np.random.seed(42)
torch.manual_seed(42)
# Number of samples per superhero
N_per_class = 200
# List of superheroes
superheroes = ['Iron Man', 'Hulk', 'Flash', 'Batman', 'Thor']
# Total number of classes
num_classes = len(superheroes)
# Total number of samples
N = N_per_class * num_classes
# Number of original features
D = 5 # Strength, Speed, Intelligence, Durability, Energy Projection
# Update the total number of features after adding the interaction term
total_features = D + 1 # Original features plus the interaction term
# Initialize feature matrix X and label vector y
X = np.zeros((N, total_features))
y = np.zeros(N, dtype=int)
# Define the mean and standard deviation for each feature per superhero
# Features: [Strength, Speed, Intelligence, Durability, Energy Projection]
superhero_stats = {
'Iron Man': {
'mean': [7, 7, 9, 8, 8],
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
},
'Hulk': {
'mean': [10, 5, 3, 10, 2],
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
},
'Flash': {
'mean': [4, 10, 6, 5, 3],
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
},
'Batman': {
'mean': [5, 6, 9, 6, 2],
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
},
'Thor': {
'mean': [10, 8, 7, 10, 9],
'std': [0.5, 0.5, 0.2, 0.5, 0.5]
},
}
# Generate synthetic data for each superhero with non-linear relationships
for idx, hero in enumerate(superheroes):
start = idx * N_per_class
end = (idx + 1) * N_per_class
means = superhero_stats[hero]['mean']
stds = superhero_stats[hero]['std']
X_hero = np.random.normal(means, stds, (N_per_class, D))
# Ensure feature values are within reasonable ranges before computing interaction
X_hero = np.clip(X_hero, 1, 10)
# Introduce non-linear feature interactions
interaction_term = np.sin(X_hero[:, 0]) * np.log(X_hero[:, 2])
X_hero = np.hstack((X_hero, interaction_term.reshape(-1, 1)))
X[start:end] = X_hero
y[start:end] = idx
# Ensure all feature values are within reasonable ranges
X[:, :D] = np.clip(X[:, :D], 1, 10)
# Shuffle the dataset
X, y = shuffle(X, y, random_state=42)
# Normalize the features
scaler = StandardScaler()
X = scaler.fit_transform(X)
# Split data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
# Convert data to torch tensors
X_train_tensor = torch.from_numpy(X_train).float()
y_train_tensor = torch.from_numpy(y_train).long()
X_test_tensor = torch.from_numpy(X_test).float()
y_test_tensor = torch.from_numpy(y_test).long()
# Random prediction function
def random_prediction(X):
num_samples = X.shape[0]
random_preds = np.random.randint(num_classes, size=num_samples)
return random_preds
# Random prediction and evaluation
random_preds = random_prediction(X_test)
random_accuracy = (random_preds == y_test).sum() / y_test.size
print('Random Prediction Accuracy: {:.2f}%'.format(100 * random_accuracy))
# Define Linear Model
class LinearModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.linear(x)
# Initialize Linear Model
input_dim = total_features
output_dim = num_classes
linear_model = LinearModel(input_dim, output_dim)
# Loss and optimizer for Linear Model
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(linear_model.parameters(), lr=0.01, weight_decay=1e-4)
# Training the Linear Model
num_epochs = 100
for epoch in range(num_epochs):
linear_model.train()
outputs = linear_model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 20 == 0:
print('Linear Model - Epoch [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, loss.item()))
# Evaluate Linear Model
linear_model.eval()
with torch.no_grad():
outputs = linear_model(X_test_tensor)
_, predicted = torch.max(outputs.data, 1)
linear_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
print('Linear Model Accuracy: {:.2f}%'.format(100 * linear_accuracy))
# Define Neural Network Model with regularization
class NeuralNet(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim):
super(NeuralNet, self).__init__()
layers = []
in_dim = input_dim
for h_dim in hidden_dims:
layers.append(nn.Linear(in_dim, h_dim))
layers.append(nn.ReLU())
layers.append(nn.BatchNorm1d(h_dim))
layers.append(nn.Dropout(0.3))
in_dim = h_dim
layers.append(nn.Linear(in_dim, output_dim))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# Initialize Neural Network Model
hidden_dims = [128, 64, 32]
neural_model = NeuralNet(input_dim, hidden_dims, output_dim)
# Loss and optimizer for Neural Network Model
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(neural_model.parameters(), lr=0.001, weight_decay=1e-4)
# Training the Neural Network Model
num_epochs = 200
for epoch in range(num_epochs):
neural_model.train()
outputs = neural_model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 20 == 0:
print('Neural Network - Epoch [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, loss.item()))
# Evaluate Neural Network Model
neural_model.eval()
with torch.no_grad():
outputs = neural_model(X_test_tensor)
_, predicted = torch.max(outputs.data, 1)
neural_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
print('Neural Network Model Accuracy: {:.2f}%'.format(100 * neural_accuracy))
# Summary of Accuracies
print("\nSummary of Accuracies:")
print('Random Prediction Accuracy: {:.2f}%'.format(100 * random_accuracy))
print('Linear Model Accuracy: {:.2f}%'.format(100 * linear_accuracy))
print('Neural Network Model Accuracy: {:.2f}%'.format(100 * neural_accuracy))