soumickmj's picture
v0 trial
068b166
raw
history blame
9.19 kB
import streamlit as st
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Set random seed for reproducibility
np.random.seed(42)
torch.manual_seed(42)
def run_disease_train()
# Number of samples per condition
N_per_class = 500
# List of conditions (classes)
conditions = ['Common Cold', 'Seasonal Allergies', 'Migraine', 'Gastroenteritis', 'Tension Headache']
# Total number of classes
num_classes = len(conditions)
# Total number of samples
N = N_per_class * num_classes
# Number of original features
D = 10 # Number of symptoms/features
# Update the total number of features after adding interaction terms
total_features = D + 2 # Original features plus two interaction terms
# Initialize feature matrix X and label vector y
X = np.zeros((N, total_features))
y = np.zeros(N, dtype=int)
# Define the mean and standard deviation for each feature per condition
# Features: [Fever, Cough, Sneezing, Runny Nose, Nausea, Vomiting, Diarrhea, Headache, Fatigue, Stress Level]
condition_stats = {
'Common Cold': {
'mean': [1, 6, 7, 8, 1, 1, 1, 5, 5, 5],
'std': [1.5, 2, 2, 2, 1.5, 1.5, 1.5, 2, 2, 2]
},
'Seasonal Allergies': {
'mean': [0, 3, 8, 9, 1, 1, 1, 4, 4, 6],
'std': [1.5, 2, 2, 2, 1.5, 1.5, 1.5, 2, 2, 2]
},
'Migraine': {
'mean': [0, 1, 1, 1, 2, 2, 2, 8, 7, 8],
'std': [1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 2, 2, 2]
},
'Gastroenteritis': {
'mean': [2, 2, 1, 1, 7, 6, 8, 5, 6, 5],
'std': [1.5, 2, 1.5, 1.5, 2, 2, 2, 2, 2, 2]
},
'Tension Headache': {
'mean': [0, 1, 1, 1, 1, 1, 1, 6, 5, 8],
'std': [1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 2, 2, 2]
},
}
# Generate synthetic data for each condition
for idx, condition in enumerate(conditions):
start = idx * N_per_class
end = (idx + 1) * N_per_class
means = condition_stats[condition]['mean']
stds = condition_stats[condition]['std']
X_condition = np.random.normal(means, stds, (N_per_class, D))
# Ensure feature values are within reasonable ranges
X_condition = np.clip(X_condition, 0, 10)
# Introduce non-linear feature interactions
interaction_term = np.sin(X_condition[:, 7]) * np.log1p(X_condition[:, 9]) # Headache and Stress Level
interaction_term2 = X_condition[:, 0] * X_condition[:, 4] # Fever * Nausea
X_condition = np.hstack((X_condition, interaction_term.reshape(-1, 1), interaction_term2.reshape(-1, 1)))
X[start:end] = X_condition
y[start:end] = idx
# Shuffle the dataset
X, y = shuffle(X, y, random_state=42)
# Normalize the features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Split data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X_scaled, y, test_size=0.2, random_state=42
)
# Convert data to torch tensors
X_train_tensor = torch.from_numpy(X_train).float()
y_train_tensor = torch.from_numpy(y_train).long()
X_test_tensor = torch.from_numpy(X_test).float()
y_test_tensor = torch.from_numpy(y_test).long()
# Random prediction function
def random_prediction(num_samples):
random_preds = np.random.randint(num_classes, size=num_samples)
return random_preds
# Random prediction and evaluation
random_preds = random_prediction(len(y_test))
random_accuracy = (random_preds == y_test).sum() / y_test.size
# Define Linear Model
class LinearModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.linear(x)
# Initialize Linear Model
input_dim = total_features
output_dim = num_classes
linear_model = LinearModel(input_dim, output_dim)
# Loss and optimizer for Linear Model
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(linear_model.parameters(), lr=0.01, weight_decay=1e-4)
# Training the Linear Model
num_epochs = 50
for epoch in range(num_epochs):
linear_model.train()
outputs = linear_model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 10 == 0:
st.write('Linear Model - Epoch [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, loss.item()))
# Evaluate Linear Model
linear_model.eval()
with torch.no_grad():
outputs = linear_model(X_test_tensor)
_, predicted = torch.max(outputs.data, 1)
linear_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
# Define Neural Network Model with regularization
class NeuralNet(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim):
super(NeuralNet, self).__init__()
layers = []
in_dim = input_dim
for h_dim in hidden_dims:
layers.append(nn.Linear(in_dim, h_dim))
layers.append(nn.ReLU())
layers.append(nn.BatchNorm1d(h_dim))
layers.append(nn.Dropout(0.5))
in_dim = h_dim
layers.append(nn.Linear(in_dim, output_dim))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# Initialize Neural Network Model
hidden_dims = [256, 128, 64]
neural_model = NeuralNet(input_dim, hidden_dims, output_dim)
# Loss and optimizer for Neural Network Model
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(neural_model.parameters(), lr=0.001, weight_decay=1e-4)
# Training the Neural Network Model
num_epochs = 300
for epoch in range(num_epochs):
neural_model.train()
outputs = neural_model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 30 == 0:
st.write('Neural Network - Epoch [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, loss.item()))
# Evaluate Neural Network Model
neural_model.eval()
with torch.no_grad():
outputs = neural_model(X_test_tensor)
_, predicted = torch.max(outputs.data, 1)
neural_accuracy = (predicted == y_test_tensor).sum().item() / y_test_tensor.size(0)
# Summary of Accuracies
st.write("\nSummary of Accuracies:....")
st.write(f'Random Prediction Accuracy: {random_accuracy * 100:.2f}%')
st.write(f'Linear Model Accuracy: {linear_accuracy * 100:.2f}%')
st.write(f'Neural Network Model Accuracy: {neural_accuracy * 100:.2f}%')
return linear_model, neural_model, scaler, conditions, num_classes
# Function to get user input and make predictions
def get_user_input_and_predict(linear_model, neural_model, scaler, conditions, num_classes):
st.write("\nAdjust the sliders for the following symptoms on a scale from 0 (none) to 10 (severe):")
# Feature names
feature_names = ['Fever', 'Cough', 'Sneezing', 'Runny Nose', 'Nausea', 'Vomiting',
'Diarrhea', 'Headache', 'Fatigue', 'Stress Level']
# Create sliders for user input
user_features = []
for feature in feature_names:
value = st.slider(feature, 0, 10, 5) # Default value set to 5
user_features.append(value)
# Calculate interaction terms
interaction_term = np.sin(user_features[7]) * np.log1p(user_features[9]) # Headache and Stress Level
interaction_term2 = user_features[0] * user_features[4] # Fever * Nausea
user_features.extend([interaction_term, interaction_term2])
# Normalize features
user_features = scaler.transform([user_features])
user_tensor = torch.from_numpy(user_features).float()
# Random prediction
random_pred = np.random.randint(num_classes)
st.write(f"\nRandom Prediction: {conditions[random_pred]}")
# Linear Model Prediction
linear_model.eval()
with torch.no_grad():
outputs = linear_model(user_tensor)
_, predicted = torch.max(outputs.data, 1)
linear_pred = predicted.item()
st.write(f"Linear Model Prediction: {conditions[linear_pred]}")
# Neural Network Prediction
neural_model.eval()
with torch.no_grad():
outputs = neural_model(user_tensor)
_, predicted = torch.max(outputs.data, 1)
neural_pred = predicted.item()
st.write(f"Neural Network Prediction: {conditions[neural_pred]}")
linear_model, neural_model, scaler, conditions, num_classes = run_disease_train()
get_user_input_and_predict(linear_model, neural_model, scaler, conditions, num_classes)