File size: 9,984 Bytes
068b166 a76b5d6 068b166 14e62e5 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 068b166 a76b5d6 713959d a76b5d6 068b166 a76b5d6 068b166 a76b5d6 b8d7e60 a76b5d6 b8d7e60 a76b5d6 b8d7e60 a76b5d6 b8d7e60 a76b5d6 b8d7e60 a76b5d6 b8d7e60 a76b5d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
import streamlit as st
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Imposta il seed casuale per la riproducibilità
np.random.seed(42)
torch.manual_seed(42)
def run_disease_train():
# Number of samples per condition
N_per_class = 500
# List of conditions (classes)
condizioni = ['Raffreddore Comune', 'Allergie Stagionali', 'Emicrania', 'Gastroenterite', 'Cefalea Tensiva']
# Total number of classes
num_classes = len(condizioni)
# Total number of samples
N = N_per_class * num_classes
# Number of original features
D = 10 # Number of symptoms/features
# Update the total number of features after adding interaction terms
total_features = D + 2 # Original features plus two interaction terms
# Initialize feature matrix X and label vector y
X = np.zeros((N, total_features))
y = np.zeros(N, dtype=int)
# Define the mean and standard deviation for each feature per condition
# Features: [Febbre, Tosse, Starnuti, Naso che Cola, Nausea, Vomito, Diarrea, Mal di Testa, Affaticamento, Livello di Stress]
statistiche_condizioni = {
'Raffreddore Comune': {
'mean': [1, 6, 7, 8, 1, 1, 1, 5, 5, 5],
'std': [1.5, 2, 2, 2, 1.5, 1.5, 1.5, 2, 2, 2]
},
'Allergie Stagionali': {
'mean': [0, 3, 8, 9, 1, 1, 1, 4, 4, 6],
'std': [1.5, 2, 2, 2, 1.5, 1.5, 1.5, 2, 2, 2]
},
'Emicrania': {
'mean': [0, 1, 1, 1, 2, 2, 2, 8, 7, 8],
'std': [1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 2, 2, 2]
},
'Gastroenterite': {
'mean': [2, 2, 1, 1, 7, 6, 8, 5, 6, 5],
'std': [1.5, 2, 1.5, 1.5, 2, 2, 2, 2, 2, 2]
},
'Cefalea Tensiva': {
'mean': [0, 1, 1, 1, 1, 1, 1, 6, 5, 8],
'std': [1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 2, 2, 2]
},
}
# Generate synthetic data for each condition
for idx, condition in enumerate(condizioni):
start = idx * N_per_class
end = (idx + 1) * N_per_class
means = statistiche_condizioni[condition]['mean']
stds = statistiche_condizioni[condition]['std']
X_condition = np.random.normal(means, stds, (N_per_class, D))
# Ensure feature values are within reasonable ranges
X_condition = np.clip(X_condition, 0, 10)
# Introduce non-linear feature interactions
interaction_term = np.sin(X_condition[:, 7]) * np.log1p(X_condition[:, 9]) # Headache and Stress Level
interaction_term2 = X_condition[:, 0] * X_condition[:, 4] # Fever * Nausea
X_condition = np.hstack((X_condition, interaction_term.reshape(-1, 1), interaction_term2.reshape(-1, 1)))
X[start:end] = X_condition
y[start:end] = idx
# Shuffle the dataset
X, y = shuffle(X, y, random_state=42)
# Normalize the features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Split data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X_scaled, y, test_size=0.2, random_state=42
)
# Convert data to torch tensors
X_train_tensor = torch.from_numpy(X_train).float()
y_train_tensor = torch.from_numpy(y_train).long()
X_test_tensor = torch.from_numpy(X_test).float()
y_test_tensor = torch.from_numpy(y_test).long()
# Random prediction function
def random_prediction(num_samples):
random_preds = np.random.randint(num_classes, size=num_samples)
return random_preds
# Random prediction and evaluation
random_preds = random_prediction(len(y_test))
random_accuracy = (random_preds == y_test).sum() / y_test.size
# Define Linear Model
class LinearModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.linear(x)
# Initialize Linear Model
input_dim = total_features
output_dim = num_classes
linear_model = LinearModel(input_dim, output_dim)
# Loss and optimizer for Linear Model
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(linear_model.parameters(), lr=0.01, weight_decay=1e-4)
# Training the Linear Model
num_epochs = 50
for epoch in range(num_epochs):
linear_model.train()
outputs = linear_model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 25 == 0:
st.write('Modello Lineare - Epoch [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, loss.item()))
# Evaluate Linear Model
linear_model.eval()
with torch.no_grad():
outputs = linear_model(X_test_tensor)
_, predicted = torch.max(outputs.data, 1)
linear_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
# Define Neural Network Model with regularization
class NeuralNet(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim):
super(NeuralNet, self).__init__()
layers = []
in_dim = input_dim
for h_dim in hidden_dims:
layers.append(nn.Linear(in_dim, h_dim))
layers.append(nn.ReLU())
layers.append(nn.BatchNorm1d(h_dim))
layers.append(nn.Dropout(0.5))
in_dim = h_dim
layers.append(nn.Linear(in_dim, output_dim))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# Initialize Neural Network Model
hidden_dims = [256, 128, 64]
neural_model = NeuralNet(input_dim, hidden_dims, output_dim)
# Loss and optimizer for Neural Network Model
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(neural_model.parameters(), lr=0.001, weight_decay=1e-4)
# Training the Neural Network Model
num_epochs = 300
for epoch in range(num_epochs):
neural_model.train()
outputs = neural_model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 30 == 0:
st.write('Rete Neurale - Epoch [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, loss.item()))
# Evaluate Neural Network Model
neural_model.eval()
with torch.no_grad():
outputs = neural_model(X_test_tensor)
_, predicted = torch.max(outputs.data, 1)
neural_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
# Summary of Accuracies
st.write("\nRiepilogo delle Accuratezze:....")
st.error(f'Accuratezza Previsione Casuale: {random_accuracy * 100:.2f}%')
st.warning(f'Accuratezza Modello Lineare: {linear_accuracy * 100:.2f}%')
st.success(f'Accuratezza Rete Neurale: {neural_accuracy * 100:.2f}%')
return linear_model, neural_model, scaler, condizioni, num_classes
def get_user_input_and_predict_disease(modello_lineare, modello_neurale, scaler, condizioni, num_classes):
st.write("Regola i cursori per i seguenti sintomi su una scala da 0 (nessuno) a 10 (grave):")
# Feature names
nomi_caratteristiche = ['Febbre', 'Tosse', 'Starnuti', 'Naso che Cola', 'Nausea', 'Vomito',
'Diarrea', 'Mal di Testa', 'Affaticamento', 'Livello di Stress']
# Initialize or retrieve user features from session state
if 'user_features' not in st.session_state:
st.session_state.user_features = [5] * len(nomi_caratteristiche) # Valore predefinito impostato a 5
# Create a form to group sliders and button
with st.form(key='symptom_form'):
for i, caratteristica in enumerate(nomi_caratteristiche):
st.session_state.user_features[i] = st.slider(
caratteristica, 0, 10, st.session_state.user_features[i], key=f'slider_{i}'
)
# Form submission button
submit_button = st.form_submit_button(label='Calcola Previsioni')
if submit_button:
st.session_state.form_submitted = True # Store form submission state
# Check if the form has been submitted
if st.session_state.get('form_submitted', False):
user_features = st.session_state.user_features.copy()
# Calculate interaction terms
termine_interazione = np.sin(user_features[7]) * np.log1p(user_features[9]) # Mal di testa e Livello di Stress
termine_interazione2 = user_features[0] * user_features[4] # Febbre * Nausea
user_features.extend([termine_interazione, termine_interazione2])
# Normalize features
user_features = scaler.transform([user_features])
user_tensor = torch.from_numpy(user_features).float()
# Random prediction
previsione_casuale = np.random.randint(num_classes)
st.error(f"\nPrevisione Casuale: {condizioni[previsione_casuale]}")
# Linear Model Prediction
modello_lineare.eval()
with torch.no_grad():
output = modello_lineare(user_tensor)
_, predetto = torch.max(output.data, 1)
previsione_lineare = predetto.item()
st.warning(f"Previsione Modello Lineare: {condizioni[previsione_lineare]}")
# Neural Network Prediction
modello_neurale.eval()
with torch.no_grad():
output = modello_neurale(user_tensor)
_, predetto = torch.max(output.data, 1)
previsione_neurale = predetto.item()
st.success(f"Previsione Rete Neurale: {condizioni[previsione_neurale]}")
|