import streamlit as st import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix import plotly.express as px import plotly.graph_objects as go def load_data(): data = pd.read_csv('exported_named_train_good.csv') data_test = pd.read_csv('exported_named_test_good.csv') X_train = data.drop("Target", axis=1) y_train = data['Target'] X_test = data_test.drop('Target', axis=1) y_test = data_test['Target'] return X_train, y_train, X_test, y_test, X_train.columns def train_models(X_train, y_train, X_test, y_test): models = { "Logistic Regression": LogisticRegression(random_state=42), "Decision Tree": DecisionTreeClassifier(random_state=42), "Random Forest": RandomForestClassifier(random_state=42), "Gradient Boost": GradientBoostingClassifier(random_state=42) } results = {} for name, model in models.items(): model.fit(X_train, y_train) # Predictions y_train_pred = model.predict(X_train) y_test_pred = model.predict(X_test) # Metrics results[name] = { 'model': model, 'train_metrics': { 'accuracy': accuracy_score(y_train, y_train_pred), 'f1': f1_score(y_train, y_train_pred, average='weighted'), 'precision': precision_score(y_train, y_train_pred), 'recall': recall_score(y_train, y_train_pred), 'roc_auc': roc_auc_score(y_train, y_train_pred) }, 'test_metrics': { 'accuracy': accuracy_score(y_test, y_test_pred), 'f1': f1_score(y_test, y_test_pred, average='weighted'), 'precision': precision_score(y_test, y_test_pred), 'recall': recall_score(y_test, y_test_pred), 'roc_auc': roc_auc_score(y_test, y_test_pred) } } return results def plot_model_performance(results): metrics = ['accuracy', 'f1', 'precision', 'recall', 'roc_auc'] fig, axes = plt.subplots(1, 2, figsize=(15, 6)) # Training metrics train_data = {model: [results[model]['train_metrics'][metric] for metric in metrics] for model in results.keys()} train_df = pd.DataFrame(train_data, index=metrics) train_df.plot(kind='bar', ax=axes[0], title='Training Performance') axes[0].set_ylim(0, 1) # Test metrics test_data = {model: [results[model]['test_metrics'][metric] for metric in metrics] for model in results.keys()} test_df = pd.DataFrame(test_data, index=metrics) test_df.plot(kind='bar', ax=axes[1], title='Test Performance') axes[1].set_ylim(0, 1) plt.tight_layout() return fig def plot_feature_importance(model, feature_names, model_type): plt.figure(figsize=(10, 6)) if model_type in ["Decision Tree", "Random Forest", "Gradient Boost"]: importance = model.feature_importances_ elif model_type == "Logistic Regression": importance = np.abs(model.coef_[0]) importance_df = pd.DataFrame({ 'feature': feature_names, 'importance': importance }).sort_values('importance', ascending=True) plt.barh(importance_df['feature'], importance_df['importance']) plt.title(f"Feature Importance - {model_type}") return plt.gcf() import streamlit as st import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, recall_score, f1_score, roc_auc_score import seaborn as sns # Configuration de la page st.set_page_config(layout="wide", page_title="ML Dashboard") # Style personnalisé st.markdown(""" """, unsafe_allow_html=True) def plot_performance_comparison(results, metric='test_metrics'): """Crée un graphique de comparaison des performances avec des couleurs distinctes""" metrics = ['accuracy', 'f1', 'recall', 'roc_auc'] model_names = list(results.keys()) # Définir des couleurs distinctes pour chaque modèle colors = ['#FF6B6B', '#4ECDC4', '#45B7D1', '#96CEB4'] data = {model: [results[model][metric][m] for m in metrics] for model in model_names} fig, ax = plt.subplots(figsize=(10, 6)) x = np.arange(len(metrics)) width = 0.2 for i, (model, values) in enumerate(data.items()): ax.bar(x + i*width, values, width, label=model, color=colors[i]) ax.set_ylabel('Score') ax.set_title(f'Comparaison des performances ({metric.split("_")[0].title()})') ax.set_xticks(x + width * (len(model_names)-1)/2) ax.set_xticklabels(metrics) ax.legend() ax.grid(True, alpha=0.3) plt.ylim(0, 1) return fig def create_metric_card(title, value): """Crée une carte de métrique stylisée""" st.markdown(f"""
{value:.3f}