Ci-Dave commited on
Commit
7c2913f
·
1 Parent(s): 58774fc

Added files

Browse files
Files changed (4) hide show
  1. .gitignore +1 -0
  2. app.py +156 -0
  3. breast_cancer.csv +0 -0
  4. requirements.txt +6 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .venv/
app.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import seaborn as sns
6
+
7
+ # Import sklearn tools
8
+ from sklearn.datasets import load_breast_cancer
9
+ from sklearn.model_selection import train_test_split
10
+ from sklearn.preprocessing import StandardScaler
11
+ from sklearn.linear_model import LogisticRegression
12
+ from sklearn.svm import SVC
13
+ from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
14
+ from sklearn.neighbors import KNeighborsClassifier
15
+ from sklearn.neural_network import MLPClassifier
16
+ from sklearn.metrics import confusion_matrix, classification_report
17
+
18
+ # Set up page configuration and title
19
+ st.set_page_config(page_title="Breast Cancer Classification App", layout="wide")
20
+ st.title("Breast Cancer Classification Analysis")
21
+
22
+ # Display a header image (ensure you have this image file)
23
+ # st.image("breast_cancer_banner.jpg", caption="Breast Cancer Analysis", use_column_width=True)
24
+
25
+ # About the app
26
+ with st.expander("About this App"):
27
+ st.markdown("""
28
+ **Overview:** This application demonstrates classification of the Breast Cancer dataset using several machine learning models.
29
+
30
+ **Models included:**
31
+ - Logistic Regression
32
+ - Support Vector Machine (SVM)
33
+ - Random Forest
34
+ - Gradient Boosting
35
+ - K-Nearest Neighbors (KNN)
36
+ - MLP Neural Network
37
+
38
+ **Features:**
39
+ - Data preprocessing and scaling
40
+ - Visualization of confusion matrices, performance reports, and detailed result discussions
41
+ - Interactive model selection and performance comparison
42
+ """)
43
+
44
+ # Load the Breast Cancer dataset
45
+ data = load_breast_cancer()
46
+ df = pd.DataFrame(data.data, columns=data.feature_names)
47
+ df['target'] = data.target
48
+
49
+ # Display the raw dataset
50
+ st.subheader("Dataset Overview")
51
+ st.write(df.head())
52
+
53
+ # Split data and preprocess
54
+ X = df.drop("target", axis=1)
55
+ y = df["target"]
56
+
57
+ # Scale features
58
+ scaler = StandardScaler()
59
+ X_scaled = scaler.fit_transform(X)
60
+
61
+ # Sidebar: Allow the user to select test set size
62
+ test_size = st.sidebar.slider("Test Set Size", 0.1, 0.5, 0.2, step=0.05)
63
+ X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=test_size, random_state=42)
64
+
65
+ # Dictionary of models
66
+ models = {
67
+ "Logistic Regression": LogisticRegression(max_iter=10000),
68
+ "SVM": SVC(kernel='linear'),
69
+ "Random Forest": RandomForestClassifier(n_estimators=100),
70
+ "Gradient Boosting": GradientBoostingClassifier(),
71
+ "KNN": KNeighborsClassifier(),
72
+ "MLP Neural Network": MLPClassifier(max_iter=500)
73
+ }
74
+
75
+ # Sidebar: Model selection
76
+ model_choice = st.sidebar.selectbox("Choose a model", list(models.keys()))
77
+ selected_model = models[model_choice]
78
+
79
+ # Train the selected model
80
+ with st.spinner("Training model..."):
81
+ selected_model.fit(X_train, y_train)
82
+ y_pred = selected_model.predict(X_test)
83
+
84
+ # Mapping labels for readability
85
+ label_mapping = {0: "malignant", 1: "benign"}
86
+ y_test_labels = [label_mapping[label] for label in y_test]
87
+ y_pred_labels = [label_mapping[label] for label in y_pred]
88
+
89
+ # Evaluate model performance
90
+ cm = confusion_matrix(y_test_labels, y_pred_labels, labels=["malignant", "benign"])
91
+ cr = classification_report(y_test_labels, y_pred_labels, output_dict=True)
92
+ # Display the confusion matrix with a smaller figure size
93
+ st.subheader(f"Confusion Matrix: {model_choice}")
94
+ fig, ax = plt.subplots(figsize=(4, 3)) # Further reduced size
95
+ sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", ax=ax,
96
+ xticklabels=["malignant", "benign"], yticklabels=["malignant", "benign"])
97
+ ax.set_xlabel("Predicted")
98
+ ax.set_ylabel("True")
99
+ plt.tight_layout() # Adjusts the layout to fit within the figure area
100
+ st.pyplot(fig)
101
+
102
+
103
+ # Display classification report
104
+ st.subheader(f"Classification Report: {model_choice}")
105
+ cr_df = pd.DataFrame(cr).transpose()
106
+ st.dataframe(cr_df)
107
+
108
+ # Result and Discussion section
109
+ st.subheader("Result and Discussion")
110
+ if model_choice == "Logistic Regression":
111
+ st.markdown("""
112
+ **Logistic Regression Discussion:**
113
+ - **Performance:** The model shows robust performance with clear separation between classes.
114
+ - **Strengths:** It is fast, interpretable, and performs well on linearly separable data.
115
+ - **Weaknesses:** May underperform on non-linear boundaries and could be sensitive to outliers.
116
+ """)
117
+ elif model_choice == "SVM":
118
+ st.markdown("""
119
+ **SVM Discussion:**
120
+ - **Performance:** The linear SVM performs well for this dataset, handling high-dimensional data efficiently.
121
+ - **Strengths:** Effective in cases where the number of features is greater than the number of samples.
122
+ - **Weaknesses:** Tuning parameters (like the kernel) is crucial and can be computationally expensive.
123
+ """)
124
+ elif model_choice == "Random Forest":
125
+ st.markdown("""
126
+ **Random Forest Discussion:**
127
+ - **Performance:** Typically provides high accuracy and robust results due to ensemble learning.
128
+ - **Strengths:** Handles non-linearity well and provides insights via feature importance.
129
+ - **Weaknesses:** Can be less interpretable and may overfit if the trees are not properly tuned.
130
+ """)
131
+ elif model_choice == "Gradient Boosting":
132
+ st.markdown("""
133
+ **Gradient Boosting Discussion:**
134
+ - **Performance:** Demonstrates strong performance by sequentially improving on previous errors.
135
+ - **Strengths:** Excellent for handling complex data patterns.
136
+ - **Weaknesses:** Sensitive to overfitting if hyperparameters are not carefully optimized.
137
+ """)
138
+ elif model_choice == "KNN":
139
+ st.markdown("""
140
+ **KNN Discussion:**
141
+ - **Performance:** Simple yet effective for this dataset, though performance depends on the choice of 'k'.
142
+ - **Strengths:** Easy to implement and understand.
143
+ - **Weaknesses:** Computationally expensive for large datasets and sensitive to feature scaling.
144
+ """)
145
+ elif model_choice == "MLP Neural Network":
146
+ st.markdown("""
147
+ **MLP Neural Network Discussion:**
148
+ - **Performance:** Provides competitive accuracy with a flexible model that can capture non-linear relationships.
149
+ - **Strengths:** Can learn complex patterns with enough training data.
150
+ - **Weaknesses:** Requires careful tuning of hyperparameters and more computational resources compared to simpler models.
151
+ """)
152
+ else:
153
+ st.markdown("No discussion available for the selected model.")
154
+
155
+ # Optionally, provide a download button for the classification report
156
+ st.download_button("Download Classification Report as CSV", cr_df.to_csv().encode('utf-8'), "classification_report.csv", "text/csv")
breast_cancer.csv ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ pandas
3
+ numpy
4
+ matplotlib
5
+ seaborn
6
+ scikit-learn