import streamlit as st import numpy as np import torch import torch.nn as nn import torch.optim as optim from sklearn.utils import shuffle from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split # Imposta il seed casuale per la riproducibilità np.random.seed(42) torch.manual_seed(42) def run_disease_train(): # Number of samples per condition N_per_class = 500 # List of conditions (classes) condizioni = ['Raffreddore Comune', 'Allergie Stagionali', 'Emicrania', 'Gastroenterite', 'Cefalea Tensiva'] # Total number of classes num_classes = len(condizioni) # Total number of samples N = N_per_class * num_classes # Number of original features D = 10 # Number of symptoms/features # Update the total number of features after adding interaction terms total_features = D + 2 # Original features plus two interaction terms # Initialize feature matrix X and label vector y X = np.zeros((N, total_features)) y = np.zeros(N, dtype=int) # Define the mean and standard deviation for each feature per condition # Features: [Febbre, Tosse, Starnuti, Naso che Cola, Nausea, Vomito, Diarrea, Mal di Testa, Affaticamento, Livello di Stress] statistiche_condizioni = { 'Raffreddore Comune': { 'mean': [1, 6, 7, 8, 1, 1, 1, 5, 5, 5], 'std': [1.5, 2, 2, 2, 1.5, 1.5, 1.5, 2, 2, 2] }, 'Allergie Stagionali': { 'mean': [0, 3, 8, 9, 1, 1, 1, 4, 4, 6], 'std': [1.5, 2, 2, 2, 1.5, 1.5, 1.5, 2, 2, 2] }, 'Emicrania': { 'mean': [0, 1, 1, 1, 2, 2, 2, 8, 7, 8], 'std': [1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 2, 2, 2] }, 'Gastroenterite': { 'mean': [2, 2, 1, 1, 7, 6, 8, 5, 6, 5], 'std': [1.5, 2, 1.5, 1.5, 2, 2, 2, 2, 2, 2] }, 'Cefalea Tensiva': { 'mean': [0, 1, 1, 1, 1, 1, 1, 6, 5, 8], 'std': [1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 2, 2, 2] }, } # Generate synthetic data for each condition for idx, condition in enumerate(condizioni): start = idx * N_per_class end = (idx + 1) * N_per_class means = statistiche_condizioni[condition]['mean'] stds = statistiche_condizioni[condition]['std'] X_condition = np.random.normal(means, stds, (N_per_class, D)) # Ensure feature values are within reasonable ranges X_condition = np.clip(X_condition, 0, 10) # Introduce non-linear feature interactions interaction_term = np.sin(X_condition[:, 7]) * np.log1p(X_condition[:, 9]) # Headache and Stress Level interaction_term2 = X_condition[:, 0] * X_condition[:, 4] # Fever * Nausea X_condition = np.hstack((X_condition, interaction_term.reshape(-1, 1), interaction_term2.reshape(-1, 1))) X[start:end] = X_condition y[start:end] = idx # Shuffle the dataset X, y = shuffle(X, y, random_state=42) # Normalize the features scaler = StandardScaler() X_scaled = scaler.fit_transform(X) # Split data into training and test sets X_train, X_test, y_train, y_test = train_test_split( X_scaled, y, test_size=0.2, random_state=42 ) # Convert data to torch tensors X_train_tensor = torch.from_numpy(X_train).float() y_train_tensor = torch.from_numpy(y_train).long() X_test_tensor = torch.from_numpy(X_test).float() y_test_tensor = torch.from_numpy(y_test).long() # Random prediction function def random_prediction(num_samples): random_preds = np.random.randint(num_classes, size=num_samples) return random_preds # Random prediction and evaluation random_preds = random_prediction(len(y_test)) random_accuracy = (random_preds == y_test).sum() / y_test.size # Define Linear Model class LinearModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): return self.linear(x) # Initialize Linear Model input_dim = total_features output_dim = num_classes linear_model = LinearModel(input_dim, output_dim) # Loss and optimizer for Linear Model criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(linear_model.parameters(), lr=0.01, weight_decay=1e-4) # Training the Linear Model num_epochs = 50 for epoch in range(num_epochs): linear_model.train() outputs = linear_model(X_train_tensor) loss = criterion(outputs, y_train_tensor) optimizer.zero_grad() loss.backward() optimizer.step() if (epoch + 1) % 25 == 0: st.write('Modello Lineare - Epoch [{}/{}], Loss: {:.4f}'.format( epoch + 1, num_epochs, loss.item())) # Evaluate Linear Model linear_model.eval() with torch.no_grad(): outputs = linear_model(X_test_tensor) _, predicted = torch.max(outputs.data, 1) linear_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0) # Define Neural Network Model with regularization class NeuralNet(nn.Module): def __init__(self, input_dim, hidden_dims, output_dim): super(NeuralNet, self).__init__() layers = [] in_dim = input_dim for h_dim in hidden_dims: layers.append(nn.Linear(in_dim, h_dim)) layers.append(nn.ReLU()) layers.append(nn.BatchNorm1d(h_dim)) layers.append(nn.Dropout(0.5)) in_dim = h_dim layers.append(nn.Linear(in_dim, output_dim)) self.model = nn.Sequential(*layers) def forward(self, x): return self.model(x) # Initialize Neural Network Model hidden_dims = [256, 128, 64] neural_model = NeuralNet(input_dim, hidden_dims, output_dim) # Loss and optimizer for Neural Network Model criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(neural_model.parameters(), lr=0.001, weight_decay=1e-4) # Training the Neural Network Model num_epochs = 300 for epoch in range(num_epochs): neural_model.train() outputs = neural_model(X_train_tensor) loss = criterion(outputs, y_train_tensor) optimizer.zero_grad() loss.backward() optimizer.step() if (epoch + 1) % 30 == 0: st.write('Rete Neurale - Epoch [{}/{}], Loss: {:.4f}'.format( epoch + 1, num_epochs, loss.item())) # Evaluate Neural Network Model neural_model.eval() with torch.no_grad(): outputs = neural_model(X_test_tensor) _, predicted = torch.max(outputs.data, 1) neural_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0) # Summary of Accuracies st.write("\nRiepilogo delle Accuratezze:....") st.error(f'Accuratezza Previsione Casuale: {random_accuracy * 100:.2f}%') st.warning(f'Accuratezza Modello Lineare: {linear_accuracy * 100:.2f}%') st.success(f'Accuratezza Rete Neurale: {neural_accuracy * 100:.2f}%') return linear_model, neural_model, scaler, condizioni, num_classes def get_user_input_and_predict_disease(modello_lineare, modello_neurale, scaler, condizioni, num_classes): st.write("Regola i cursori per i seguenti sintomi su una scala da 0 (nessuno) a 10 (grave):") # Feature names nomi_caratteristiche = ['Febbre', 'Tosse', 'Starnuti', 'Naso che Cola', 'Nausea', 'Vomito', 'Diarrea', 'Mal di Testa', 'Affaticamento', 'Livello di Stress'] # Initialize or retrieve user features from session state if 'user_features' not in st.session_state: st.session_state.user_features = [5] * len(nomi_caratteristiche) # Valore predefinito impostato a 5 # Create a form to group sliders and button with st.form(key='symptom_form'): for i, caratteristica in enumerate(nomi_caratteristiche): st.session_state.user_features[i] = st.slider( caratteristica, 0, 10, st.session_state.user_features[i], key=f'slider_{i}' ) # Form submission button submit_button = st.form_submit_button(label='Calcola Previsioni') if submit_button: st.session_state.form_submitted = True # Store form submission state # Check if the form has been submitted if st.session_state.get('form_submitted', False): user_features = st.session_state.user_features.copy() # Calculate interaction terms termine_interazione = np.sin(user_features[7]) * np.log1p(user_features[9]) # Mal di testa e Livello di Stress termine_interazione2 = user_features[0] * user_features[4] # Febbre * Nausea user_features.extend([termine_interazione, termine_interazione2]) # Normalize features user_features = scaler.transform([user_features]) user_tensor = torch.from_numpy(user_features).float() # Random prediction previsione_casuale = np.random.randint(num_classes) st.error(f"\nPrevisione Casuale: {condizioni[previsione_casuale]}") # Linear Model Prediction modello_lineare.eval() with torch.no_grad(): output = modello_lineare(user_tensor) _, predetto = torch.max(output.data, 1) previsione_lineare = predetto.item() st.warning(f"Previsione Modello Lineare: {condizioni[previsione_lineare]}") # Neural Network Prediction modello_neurale.eval() with torch.no_grad(): output = modello_neurale(user_tensor) _, predetto = torch.max(output.data, 1) previsione_neurale = predetto.item() st.success(f"Previsione Rete Neurale: {condizioni[previsione_neurale]}")