File size: 11,659 Bytes
8876cd2
 
3c78fe7
 
 
 
8af6ce4
4727e02
ce3be16
445bf3a
258d659
8876cd2
b18aa7e
aefb60f
fbfa266
aefb60f
f9d5a22
a4c0920
0c9d457
00423c4
e725540
fbfa266
dc31fa3
aefb60f
ce3be16
b18aa7e
aefb60f
b18aa7e
8876cd2
fbfa266
aefb60f
0e84b81
445bf3a
 
 
 
fbfa266
445bf3a
aefb60f
445bf3a
aefb60f
445bf3a
 
fbfa266
0bf6315
0e84b81
d58f189
 
 
 
fbfa266
d58f189
a963fb1
 
 
 
0bf6315
a963fb1
 
 
 
 
 
 
 
 
 
 
 
0bf6315
 
d58f189
 
fbfa266
932646c
 
5cdc823
99bfd14
 
 
 
5cdc823
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
932646c
85775ba
4f71456
5cdc823
3c78fe7
b651e33
 
3c78fe7
99bfd14
 
85775ba
4f71456
85775ba
3c78fe7
d93a02f
4f71456
932646c
c49dbd7
e0f2797
932646c
e0f2797
a963fb1
e0f2797
b651e33
b18aa7e
fbfa266
a963fb1
 
 
fbfa266
 
 
 
 
0bf6315
8af6ce4
3c78fe7
79b1800
 
8af6ce4
221bc87
 
 
 
c71a3a0
221bc87
c71a3a0
221bc87
79b1800
8af6ce4
79b1800
 
8af6ce4
79b1800
 
8af6ce4
3c78fe7
b832f73
 
 
 
 
 
 
 
 
 
445bf3a
b9cb41f
2aca552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9cb41f
2aca552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9cb41f
2aca552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445bf3a
 
 
 
b9cb41f
445bf3a
 
 
 
 
 
 
b9cb41f
8876cd2
445bf3a
 
 
 
d58f189
 
b9cb41f
d58f189
 
 
 
 
445bf3a
258d659
 
8876cd2
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
import gradio as gr
import joblib
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder
from sklearn.impute import KNNImputer
from sklearn.decomposition import PCA
import pickle
from tensorflow.keras.models import load_model
import pickle
import hdbscan



def predict_ann(age, workclass, education, occupation, race, gender, capital_gain, capital_loss, hours_per_week, native_country):

    columns = { "0":[0],
    "age": [age], "workclass":[workclass], "educational-num":[education], "occupation":[occupation],
    "race":[race], "gender":[gender], "capital-gain":[capital_gain], "capital-loss":[capital_loss], 
    "hours-per-week":[hours_per_week], "native-country":[native_country]}
    df = pd.DataFrame(data=columns)
    fixed_features = cleaning_features(df,race,False)
    print(fixed_features)

    ann_model = load_model('ann_model.h5')
    prediction = ann_model.predict(fixed_features)

    return "Income >50K" if prediction == 1 else "Income <=50K"

def predict_rf(age, workclass, education,  occupation,  race, gender, capital_gain, capital_loss, hours_per_week, native_country):

    columns = { 
    "age": [age], "workclass":[workclass], "educational-num":[education], "occupation":[occupation],
    "race":[race], "gender":[gender], "capital-gain":[capital_gain], "capital-loss":[capital_loss], 
    "hours-per-week":[hours_per_week], "native-country":[native_country]}
    df = pd.DataFrame(data=columns)
    fixed_features = cleaning_features(df,race,False)
    print(fixed_features)

    rf_model = pickle.load(open('rf_model.pkl', 'rb'))

    return "Income >50K" if prediction == 1 else "Income <=50K"

def predict_hb(age, workclass, education,  occupation,  race, gender, capital_gain, capital_loss, hours_per_week, native_country):
    
    columns = { 
    "age": [age], "workclass":[workclass], "educational-num":[education], "occupation":[occupation],
    "race":[race], "gender":[gender], "capital-gain":[capital_gain], "capital-loss":[capital_loss], 
    "hours-per-week":[hours_per_week], "native-country":[native_country]}
    df = pd.DataFrame(data=columns)
    fixed_features = cleaning_features(df,race,True)
    print(fixed_features)
    # hdb_model = pickle.load(open('hdbscan_model.pkl', 'rb'))
    # prediction = hdb_model.approximate_predict(fixed_features)
    scaler = StandardScaler()
    X = scaler.fit_transform(fixed_features)
    
    clusterer = hdbscan.HDBSCAN(
    min_cluster_size=220,
    min_samples=117,
    metric='euclidean',
    cluster_selection_method='eom',
    prediction_data=True,
    cluster_selection_epsilon=0.28479667859306007
    )

    prediction = clusterer.fit_predict(X)
    filename = 'hdbscan_model.pkl'
    pickle.dump(clusterer, open(filename, 'wb'))

    return f"Predicted Cluster (HDBSCAN): {prediction[-1]}"


def cleaning_features(data,race,hdbscan):
    # with open('race_onehot_encoder.pkl', 'rb') as enc_file:
    #     encoder = pickle.load(enc_file)
    
    with open('label_encoder_work.pkl', 'rb') as le_file:
        le_work = pickle.load(le_file)
    with open('label_encoder_occ.pkl', 'rb') as le_file:
        le_occ = pickle.load(le_file)

    with open('scaler.pkl', 'rb') as scaler_file:
        scaler = pickle.load(scaler_file)
        
    education_num_mapping = {
        "Preschool": 1,
        "1st-4th": 2,
        "5th-6th": 3,
        "7th-8th": 4,
        "9th": 5,
        "10th": 6,
        "11th": 7,
        "12th": 8,
        "HS-grad": 9,
        "Some-college": 10,
        "Assoc-voc": 11,
        "Assoc-acdm": 12,
        "Bachelors": 13,
        "Masters": 14,
        "Doctorate": 15,
        "Prof-school": 16
    }
    race_categories = ["Amer-Indian-Eskimo", "Asian-Pac-Islander","Black", "Other","White"]
    gender_mapping = {"Male":1,"Female":0}
    country_mapping = {"United-States":1,"Other":0}
    
    numeric_cols = ['age', 'educational-num', 'hours-per-week']
    # columns_to_encode = ['race','marital-status','relationship']
    columns_to_encode = ['race']
    
    data['workclass'] = le_work.transform(data['workclass'])
    data['occupation'] = le_occ.transform(data['occupation'])
    data['gender'] = data['gender'].map(gender_mapping)
    data['native-country'] = data['native-country'].map(country_mapping)
    data['educational-num'] = data['educational-num'].map(education_num_mapping)
    
    data[numeric_cols] = scaler.transform(data[numeric_cols])

    for races in race_categories:
        if race == races:
            data[f'race_{races}'] = 1
        else:
            data[f'race_{races}'] = 0
   
    data = data.drop(columns=['race'])

    data = pca(data)
    if(hdbscan):
        df_transformed = pd.read_csv('dataset.csv')
        X = df_transformed.drop('income', axis=1)
        data = pd.concat([X, data], ignore_index=True)
        data['capital-gain'] = np.log1p(data['capital-gain'])
        data['capital-loss'] = np.log1p(data['capital-loss'])
        scaler = joblib.load("robust_scaler.pkl")
        numerical_features = ['age', 'capital-gain', 'capital-loss', 'hours-per-week']
        data[numerical_features] = scaler.transform(data[numerical_features])
        
    return data



def pca(data):
    encoder_pkl = 'onehot_encoder.pkl'
    pca_model_pkl = 'pca.pkl'
    
    with open(pca_model_pkl, 'rb') as file:  
        pca_model = pickle.load(file)
    with open(encoder_pkl, 'rb') as file:  
        encoder = pickle.load(file)
    
    one_hot_encoded = encoder.transform(data[['workclass', 'occupation']])
    encoded_columns_df = pd.DataFrame(one_hot_encoded, columns=encoder.get_feature_names_out())
    pca_result_net = pca_model.transform(encoded_columns_df)
    pca_columns = [f'pca_component_{i+1}' for i in range(pca_model.n_components_)]
    pca_df = pd.DataFrame(pca_result_net, columns=pca_columns)
    data = data.drop(columns=['workclass', 'occupation'], axis=1)
    data = pd.concat([data, pca_df], axis=1) 
    return data

def hbdscan_tranform(df_transformed):
    df_transformed['capital-gain'] = np.log1p(df_transformed['capital-gain'])
    df_transformed['capital-loss'] = np.log1p(df_transformed['capital-loss'])
    
    # Apply RobustScaler to all numerical features
    numerical_features = ['age', 'capital-gain', 'capital-loss', 'hours-per-week']
    scaler = RobustScaler()
    df_transformed[numerical_features] = scaler.fit_transform(df_transformed[numerical_features])
    return df_transformed

# Shared inputs
ann_inputs = [
        gr.Slider(18, 90, step=1, label="Age"),
        gr.Dropdown(
            ["Private", "Self-emp-not-inc", "Self-emp-inc", "Federal-gov", 
             "Local-gov", "State-gov", "Without-pay", "Never-worked"], 
            label="Workclass"
        ),
        gr.Dropdown(
            ["Bachelors", "Some-college", "11th", "HS-grad", "Prof-school", 
             "Assoc-acdm", "Assoc-voc", "9th", "7th-8th", "12th", "Masters", 
             "1st-4th", "10th", "Doctorate", "5th-6th", "Preschool"], 
            label="Education"
        ),
        gr.Dropdown(
            ["Tech-support", "Craft-repair", "Other-service", "Sales", 
             "Exec-managerial", "Prof-specialty", "Handlers-cleaners", 
             "Machine-op-inspct", "Adm-clerical", "Farming-fishing", 
             "Transport-moving", "Priv-house-serv", "Protective-serv", 
             "Armed-Forces"], 
            label="Occupation"
        ),
        gr.Dropdown(
            ["White", "Black", "Asian-Pac-Islander", "Amer-Indian-Eskimo", "Other"], 
            label="Race"
        ),
        gr.Dropdown(
            ["Male", "Female"], 
            label="Gender"
        ),
        gr.Slider(1, 60, step=1, label="Hours Per Week"),
        gr.Slider(0, 100000, step=100, label="Capital Gain"),
        gr.Slider(0, 5000, step=50, label="Capital Loss"),
        gr.Dropdown(
            ["United-States", "Other"], 
            label="Native Country"
        )
    ]
rf_inputs = [
        gr.Slider(18, 90, step=1, label="Age"),
        gr.Dropdown(
            ["Private", "Self-emp-not-inc", "Self-emp-inc", "Federal-gov", 
             "Local-gov", "State-gov", "Without-pay", "Never-worked"], 
            label="Workclass"
        ),
        gr.Dropdown(
            ["Bachelors", "Some-college", "11th", "HS-grad", "Prof-school", 
             "Assoc-acdm", "Assoc-voc", "9th", "7th-8th", "12th", "Masters", 
             "1st-4th", "10th", "Doctorate", "5th-6th", "Preschool"], 
            label="Education"
        ),
        gr.Dropdown(
            ["Tech-support", "Craft-repair", "Other-service", "Sales", 
             "Exec-managerial", "Prof-specialty", "Handlers-cleaners", 
             "Machine-op-inspct", "Adm-clerical", "Farming-fishing", 
             "Transport-moving", "Priv-house-serv", "Protective-serv", 
             "Armed-Forces"], 
            label="Occupation"
        ),
        gr.Dropdown(
            ["White", "Black", "Asian-Pac-Islander", "Amer-Indian-Eskimo", "Other"], 
            label="Race"
        ),
        gr.Dropdown(
            ["Male", "Female"], 
            label="Gender"
        ),
        gr.Slider(1, 60, step=1, label="Hours Per Week"),
        gr.Slider(0, 100000, step=100, label="Capital Gain"),
        gr.Slider(0, 5000, step=50, label="Capital Loss"),
        gr.Dropdown(
            ["United-States", "Other"], 
            label="Native Country"
        )
    ]
hbd_inputs = [
        gr.Slider(18, 90, step=1, label="Age"),
        gr.Dropdown(
            ["Private", "Self-emp-not-inc", "Self-emp-inc", "Federal-gov", 
             "Local-gov", "State-gov", "Without-pay", "Never-worked"], 
            label="Workclass"
        ),
        gr.Dropdown(
            ["Bachelors", "Some-college", "11th", "HS-grad", "Prof-school", 
             "Assoc-acdm", "Assoc-voc", "9th", "7th-8th", "12th", "Masters", 
             "1st-4th", "10th", "Doctorate", "5th-6th", "Preschool"], 
            label="Education"
        ),
        gr.Dropdown(
            ["Tech-support", "Craft-repair", "Other-service", "Sales", 
             "Exec-managerial", "Prof-specialty", "Handlers-cleaners", 
             "Machine-op-inspct", "Adm-clerical", "Farming-fishing", 
             "Transport-moving", "Priv-house-serv", "Protective-serv", 
             "Armed-Forces"], 
            label="Occupation"
        ),
        gr.Dropdown(
            ["White", "Black", "Asian-Pac-Islander", "Amer-Indian-Eskimo", "Other"], 
            label="Race"
        ),
        gr.Dropdown(
            ["Male", "Female"], 
            label="Gender"
        ),
        gr.Slider(1, 60, step=1, label="Hours Per Week"),
        gr.Slider(0, 100000, step=100, label="Capital Gain"),
        gr.Slider(0, 5000, step=50, label="Capital Loss"),
        gr.Dropdown(
            ["United-States", "Other"], 
            label="Native Country"
        )
    ]

# Interfaces for each model
ann_interface = gr.Interface(
    fn=predict_ann,
    inputs=ann_inputs,
    outputs="text",
    title="Artificial Neural Network",
    description="Predict income using an Artificial Neural Network."
)

rf_interface = gr.Interface(
    fn=predict_rf,
    inputs=rf_inputs,
    outputs="text",
    title="Random Forest",
    description="Predict income using a Random Forest model."
)

hb_interface = gr.Interface(
    fn=predict_hb,
    inputs=hbd_inputs,
    outputs="text",
    title="HDBScan Clustering",
    description="Predict income using a HDBScan Clustering model."
)

interface = gr.TabbedInterface(
    [ann_interface, rf_interface, hb_interface],
    ["ANN Model", "Random Forest Model", "HDBScan Model"]
)

interface.launch()