CosmickVisions commited on
Commit
ca88a31
·
verified ·
1 Parent(s): b6aa250

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -14
app.py CHANGED
@@ -614,10 +614,10 @@ elif app_mode == "Model Training":
614
  min_features = 1 # Ensure at least one feature is used
615
  max_features = len(feature_columns) if len(feature_columns) > 0 else 1 # Use 1 if no features are selected
616
  param_grid = {
617
- 'n_estimators': list(range(st.slider("Number of Estimators", 10, 200, 100, help="Number of trees in the forest."),(st.slider("Number of Estimators", 10, 200, 100, help="Number of trees in the forest.")+1))),
618
- 'max_depth': list(range(st.slider("Max Depth", 3, 20, 10, help="Maximum depth of the tree."),(st.slider("Max Depth", 3, 20, 10, help="Maximum depth of the tree.")+1))),
619
- 'min_samples_split': list(range(st.slider("Minimum Samples Split", 2, 10, 2, help="Minimum samples required to split an internal node"),(st.slider("Minimum Samples Split", 2, 10, 2, help="Minimum samples required to split an internal node")+1))), #New hyperparameter
620
- 'min_samples_leaf': list(range(st.slider("Minimum Samples Leaf", 1, 10, 1, help="Minimum samples required to be at a leaf node"),(st.slider("Minimum Samples Leaf", 1, 10, 1, help="Minimum samples required to be at a leaf node")+1))), #New hyperparameter
621
  }
622
 
623
  # Train-Test Split
@@ -661,7 +661,7 @@ elif app_mode == "Model Training":
661
 
662
  #Feature Selection
663
  if feature_selection_method == "SelectKBest":
664
- k = st.slider("Number of Features to Select", 1, len(feature_columns), len(feature_columns))
665
  selector = SelectKBest(k=k)
666
  X_train_selected = selector.fit_transform(X_train_processed, y_train)
667
  X_test_selected = selector.transform(X_test_processed)
@@ -762,7 +762,7 @@ elif app_mode == "Model Training":
762
 
763
  #Heatmap
764
  fig_conf, ax_conf = plt.subplots()
765
- sns.heatmap(conf_matrix, ax=ax_conf, annot=True, fmt='d', cmap='Blues')
766
  ax_conf.set_xlabel('Predicted Labels')
767
  ax_conf.set_ylabel('True Labels')
768
  ax_conf.set_title('Confusion Matrix')
@@ -790,17 +790,12 @@ elif app_mode == "Model Training":
790
  #Create data that determines the learning and validation curve and what we have to add
791
  train_sizes, train_scores, valid_scores = learning_curve(model, X_train_selected, y_train, cv=5, scoring='accuracy' if problem_type =="Classification" else 'neg_mean_squared_error', n_jobs=-1) #Define cross validation for run
792
 
793
-
794
- #Take and define what this is for from the results that has been generated
795
  train_mean = np.mean(train_scores, axis=1)
796
  train_std = np.std(train_scores, axis=1)
797
  valid_mean = np.mean(valid_scores, axis=1)
798
  valid_std = np.std(valid_scores, axis=1)
799
 
800
-
801
- #Plot each of the variables that has to be used.
802
-
803
-
804
  fig_lc, ax_lc = plt.subplots()
805
  ax_lc.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='Training ' + ('Accuracy' if problem_type == "Classification" else "Neg MSE"))
806
  ax_lc.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')
@@ -818,7 +813,6 @@ elif app_mode == "Model Training":
818
 
819
  except Exception as e:
820
  st.error(f"An error occurred: {e}")
821
-
822
  else:
823
  st.write("Please upload and clean data first.")
824
 
@@ -856,7 +850,6 @@ elif app_mode == "Model Training":
856
  st.write(f"Accuracy: {accuracy:.4f}")
857
  except Exception as e: #local error
858
  st.error(f"An error occurred during model evaluation: {e}")
859
-
860
  elif app_mode == "Predictions":
861
  st.title("🔮 Make Predictions")
862
 
 
614
  min_features = 1 # Ensure at least one feature is used
615
  max_features = len(feature_columns) if len(feature_columns) > 0 else 1 # Use 1 if no features are selected
616
  param_grid = {
617
+ 'n_estimators': list(range(st.slider("Number of Estimators", 10, 200, 100, help="Number of trees in the forest.", key="n_estimators1"),(st.slider("Number of Estimators", 10, 200, 100, help="Number of trees in the forest.", key = "n_estimators2")+1))),
618
+ 'max_depth': list(range(st.slider("Max Depth", 3, 20, 10, help="Maximum depth of the tree.", key="max_depth1"),(st.slider("Max Depth", 3, 20, 10, help="Maximum depth of the tree.", key = "max_depth2")+1))),
619
+ 'min_samples_split': list(range(st.slider("Minimum Samples Split", 2, 10, 2, help="Minimum samples required to split an internal node", key="min_samples_split1"),(st.slider("Minimum Samples Split", 2, 10, 2, help="Minimum samples required to split an internal node", key = "min_samples_split2")+1))), #New hyperparameter
620
+ 'min_samples_leaf': list(range(st.slider("Minimum Samples Leaf", 1, 10, 1, help="Minimum samples required to be at a leaf node", key="min_samples_leaf1"),(st.slider("Minimum Samples Leaf", 1, 10, 1, help="Minimum samples required to be at a leaf node", key = "min_samples_leaf2")+1))), #New hyperparameter
621
  }
622
 
623
  # Train-Test Split
 
661
 
662
  #Feature Selection
663
  if feature_selection_method == "SelectKBest":
664
+ k = st.slider("Number of Features to Select", 1, len(feature_columns), len(feature_columns), key = "featureselector")
665
  selector = SelectKBest(k=k)
666
  X_train_selected = selector.fit_transform(X_train_processed, y_train)
667
  X_test_selected = selector.transform(X_test_processed)
 
762
 
763
  #Heatmap
764
  fig_conf, ax_conf = plt.subplots()
765
+ sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', ax_conf)
766
  ax_conf.set_xlabel('Predicted Labels')
767
  ax_conf.set_ylabel('True Labels')
768
  ax_conf.set_title('Confusion Matrix')
 
790
  #Create data that determines the learning and validation curve and what we have to add
791
  train_sizes, train_scores, valid_scores = learning_curve(model, X_train_selected, y_train, cv=5, scoring='accuracy' if problem_type =="Classification" else 'neg_mean_squared_error', n_jobs=-1) #Define cross validation for run
792
 
793
+ #Then add a plot for the learning curve and use st.pyplot
 
794
  train_mean = np.mean(train_scores, axis=1)
795
  train_std = np.std(train_scores, axis=1)
796
  valid_mean = np.mean(valid_scores, axis=1)
797
  valid_std = np.std(valid_scores, axis=1)
798
 
 
 
 
 
799
  fig_lc, ax_lc = plt.subplots()
800
  ax_lc.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='Training ' + ('Accuracy' if problem_type == "Classification" else "Neg MSE"))
801
  ax_lc.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')
 
813
 
814
  except Exception as e:
815
  st.error(f"An error occurred: {e}")
 
816
  else:
817
  st.write("Please upload and clean data first.")
818
 
 
850
  st.write(f"Accuracy: {accuracy:.4f}")
851
  except Exception as e: #local error
852
  st.error(f"An error occurred during model evaluation: {e}")
 
853
  elif app_mode == "Predictions":
854
  st.title("🔮 Make Predictions")
855