File size: 2,336 Bytes
b4f6507
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler

# 讀取數據
df = pd.read_csv('heart.csv')

# 準備特徵和目標變量
X = df.drop('target', axis=1)
y = df['target']

# 分割數據
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 標準化特徵
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 計算特徵重要性
def calculate_importance():
    # Linear Regression
    lr = LinearRegression()
    lr.fit(X_train_scaled, y_train)
    lr_importance = np.abs(lr.coef_)

    # CART
    cart = DecisionTreeClassifier(random_state=42)
    cart.fit(X_train, y_train)
    cart_importance = cart.feature_importances_

    # Random Forest
    rf = RandomForestClassifier(n_estimators=100, random_state=42)
    rf.fit(X_train, y_train)
    rf_importance = rf.feature_importances_

    return lr_importance, cart_importance, rf_importance

# 創建特徵重要性 DataFrame
lr_importance, cart_importance, rf_importance = calculate_importance()
feature_importance = pd.DataFrame({
    'Feature': X.columns,
    'Linear Regression': lr_importance,
    'CART': cart_importance,
    'Random Forest': rf_importance
})

# 排序
feature_importance = feature_importance.sort_values('Random Forest', ascending=False)

# 繪製特徵重要性圖表
def plot_importance(model):
    plt.figure(figsize=(10, 6))
    plt.bar(feature_importance['Feature'], feature_importance[model])
    plt.title(f'{model} Feature Importance')
    plt.xlabel('Features')
    plt.ylabel('Importance')
    plt.xticks(rotation=45, ha='right')
    st.pyplot(plt)

# Streamlit UI
st.title("心臟病預測模型特徵重要性分析")
st.write("選擇一個模型來查看其特徵重要性:")

# 下拉選擇模型
model = st.selectbox("選擇模型", ["Linear Regression", "CART", "Random Forest"])

# 顯示圖表
plot_importance(model)

# 顯示數據框
st.write(f"{model} 特徵重要性數據:")
st.dataframe(feature_importance[['Feature', model]])