Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -29,6 +29,8 @@ import time
|
|
29 |
from sklearn.cluster import KMeans
|
30 |
import scipy.stats as stats
|
31 |
import mimetypes
|
|
|
|
|
32 |
|
33 |
# Configurations
|
34 |
st.set_page_config(page_title="Executive Insights Pro", layout="wide", page_icon="📈")
|
@@ -737,6 +739,9 @@ elif app_mode == "Model Training":
|
|
737 |
from sklearn.metrics import confusion_matrix, roc_curve, auc, precision_recall_curve, classification_report #Import here to avoid library bloat
|
738 |
import seaborn as sns
|
739 |
import matplotlib.pyplot as plt #Added import statement
|
|
|
|
|
|
|
740 |
|
741 |
#Weighted averaging for metrics for multiclass
|
742 |
average_method = "weighted" #changed from None
|
@@ -766,21 +771,51 @@ elif app_mode == "Model Training":
|
|
766 |
st.pyplot(fig_conf)
|
767 |
|
768 |
|
769 |
-
|
770 |
-
|
771 |
-
importances = model.feature_importances_ # Assumed tree-based model
|
772 |
-
feat_importances = pd.Series(importances, index=X_train.columns)
|
773 |
-
feat_importances = feat_importances.nlargest(20)
|
774 |
|
775 |
-
|
776 |
-
|
777 |
-
|
778 |
-
|
779 |
-
|
780 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
781 |
|
782 |
-
except Exception as e:
|
783 |
-
st.error(f"An error occurred: {e}")
|
784 |
else:
|
785 |
st.write("Please upload and clean data first.")
|
786 |
|
@@ -803,7 +838,7 @@ elif app_mode == "Model Training":
|
|
803 |
|
804 |
#Model Evaluation Section
|
805 |
if 'X_test' in locals() and st.session_state.model is not None:
|
806 |
-
try:
|
807 |
y_pred = st.session_state.model.predict(X_test)
|
808 |
|
809 |
if problem_type == "Regression":
|
@@ -812,11 +847,12 @@ elif app_mode == "Model Training":
|
|
812 |
st.write(f"Mean Squared Error: {mse:.4f}")
|
813 |
st.write(f"R-squared: {r2:.4f}")
|
814 |
else:
|
815 |
-
|
816 |
-
|
817 |
-
|
818 |
-
|
819 |
-
|
|
|
820 |
|
821 |
elif app_mode == "Predictions":
|
822 |
st.title("🔮 Make Predictions")
|
|
|
29 |
from sklearn.cluster import KMeans
|
30 |
import scipy.stats as stats
|
31 |
import mimetypes
|
32 |
+
import matplotlib.pyplot as plt
|
33 |
+
from sklearn.model_selection import learning_curve
|
34 |
|
35 |
# Configurations
|
36 |
st.set_page_config(page_title="Executive Insights Pro", layout="wide", page_icon="📈")
|
|
|
739 |
from sklearn.metrics import confusion_matrix, roc_curve, auc, precision_recall_curve, classification_report #Import here to avoid library bloat
|
740 |
import seaborn as sns
|
741 |
import matplotlib.pyplot as plt #Added import statement
|
742 |
+
import numpy as np
|
743 |
+
import pandas as pd
|
744 |
+
from sklearn.model_selection import learning_curve, validation_curve
|
745 |
|
746 |
#Weighted averaging for metrics for multiclass
|
747 |
average_method = "weighted" #changed from None
|
|
|
771 |
st.pyplot(fig_conf)
|
772 |
|
773 |
|
774 |
+
#Added section for model visualization
|
775 |
+
st.subheader("Model Visualization")
|
|
|
|
|
|
|
776 |
|
777 |
+
if problem_type == "Classification":
|
778 |
+
|
779 |
+
try: #All the plotting code here.
|
780 |
+
#Added code that calculates the learning curves
|
781 |
+
train_sizes, train_scores, valid_scores = learning_curve(model, X_train_selected, y_train, cv=5, scoring='accuracy')
|
782 |
+
|
783 |
+
#Then add a plot for the learning curve and use st.pyplot
|
784 |
+
train_mean = np.mean(train_scores, axis=1)
|
785 |
+
train_std = np.std(train_scores, axis=1)
|
786 |
+
valid_mean = np.mean(valid_scores, axis=1)
|
787 |
+
valid_std = np.std(valid_scores, axis=1)
|
788 |
+
|
789 |
+
fig_lc, ax_lc = plt.subplots() #plot the curve in matplotlib
|
790 |
+
|
791 |
+
|
792 |
+
ax_lc.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='Training Accuracy')
|
793 |
+
ax_lc.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')
|
794 |
+
ax_lc.plot(train_sizes, valid_mean, color='green', linestyle='--', marker='s', markersize=5, label='Validation Accuracy')
|
795 |
+
ax_lc.fill_between(train_sizes, valid_mean + valid_std, valid_mean - valid_std, alpha=0.15, color='green')
|
796 |
+
|
797 |
+
ax_lc.set_title('Learning Curves')
|
798 |
+
ax_lc.set_xlabel('Training Set Size')
|
799 |
+
ax_lc.set_ylabel('Accuracy')
|
800 |
+
ax_lc.legend(loc='best')
|
801 |
+
st.pyplot(fig_lc) # Display the figure in Streamlit
|
802 |
+
|
803 |
+
|
804 |
+
#Feature Importance (Tree-based Models)
|
805 |
+
if model_name in ["Random Forest", "Gradient Boosting"] : #Make sure its the correct type for extraction
|
806 |
+
importances = model.feature_importances_ # Assumed tree-based model
|
807 |
+
feat_importances = pd.Series(importances, index=X_train.columns)
|
808 |
+
feat_importances = feat_importances.nlargest(20)
|
809 |
+
|
810 |
+
fig_feat, ax_feat = plt.subplots()
|
811 |
+
feat_importances.plot(kind='barh', ax=ax_feat)
|
812 |
+
ax_feat.set_xlabel('Relative Importance')
|
813 |
+
ax_feat.set_ylabel('Features')
|
814 |
+
ax_feat.set_title('Feature Importances')
|
815 |
+
st.pyplot(fig_feat)
|
816 |
+
except Exception as e: #Local error
|
817 |
+
st.write(f"Plotting functions requires tree based-models and for classification: {e}")
|
818 |
|
|
|
|
|
819 |
else:
|
820 |
st.write("Please upload and clean data first.")
|
821 |
|
|
|
838 |
|
839 |
#Model Evaluation Section
|
840 |
if 'X_test' in locals() and st.session_state.model is not None:
|
841 |
+
try: #Error catching with new test data
|
842 |
y_pred = st.session_state.model.predict(X_test)
|
843 |
|
844 |
if problem_type == "Regression":
|
|
|
847 |
st.write(f"Mean Squared Error: {mse:.4f}")
|
848 |
st.write(f"R-squared: {r2:.4f}")
|
849 |
else:
|
850 |
+
from sklearn.metrics import confusion_matrix, roc_curve, auc, precision_recall_curve, classification_report #Import here to avoid library bloat
|
851 |
+
accuracy = accuracy_score(y_test, y_pred)
|
852 |
+
st.write(f"Accuracy: {accuracy:.4f}")
|
853 |
+
|
854 |
+
except Exception as e: #local error
|
855 |
+
st.error(f"An error occurred during model evaluation: {e}")
|
856 |
|
857 |
elif app_mode == "Predictions":
|
858 |
st.title("🔮 Make Predictions")
|