erjonb commited on
Commit
e7327b5
·
1 Parent(s): 8bc618a

Delete P2 - Secom Notebook - Mercury.ipynb

Browse files
Files changed (1) hide show
  1. P2 - Secom Notebook - Mercury.ipynb +0 -1422
P2 - Secom Notebook - Mercury.ipynb DELETED
@@ -1,1422 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "attachments": {},
5
- "cell_type": "markdown",
6
- "metadata": {
7
- "slideshow": {
8
- "slide_type": "skip"
9
- }
10
- },
11
- "source": [
12
- "# **Classifying products in Semiconductor Industry**"
13
- ]
14
- },
15
- {
16
- "attachments": {},
17
- "cell_type": "markdown",
18
- "metadata": {
19
- "slideshow": {
20
- "slide_type": "skip"
21
- }
22
- },
23
- "source": [
24
- "#### **Import the data**"
25
- ]
26
- },
27
- {
28
- "cell_type": "code",
29
- "execution_count": 1,
30
- "metadata": {
31
- "slideshow": {
32
- "slide_type": "skip"
33
- }
34
- },
35
- "outputs": [],
36
- "source": [
37
- "# import pandas for data manipulation\n",
38
- "# import numpy for numerical computation\n",
39
- "# import seaborn for data visualization\n",
40
- "# import matplotlib for data visualization\n",
41
- "# import stats for statistical analysis\n",
42
- "# import train_test_split for splitting data into training and testing sets\n",
43
- "\n",
44
- "\n",
45
- "import pandas as pd\n",
46
- "import numpy as np\n",
47
- "import seaborn as sns\n",
48
- "import matplotlib.pyplot as plt\n",
49
- "from scipy import stats\n",
50
- "from sklearn.model_selection import train_test_split\n",
51
- "import mercury as mr"
52
- ]
53
- },
54
- {
55
- "cell_type": "code",
56
- "execution_count": 2,
57
- "metadata": {
58
- "slideshow": {
59
- "slide_type": "skip"
60
- }
61
- },
62
- "outputs": [
63
- {
64
- "data": {
65
- "application/mercury+json": {
66
- "allow_download": true,
67
- "code_uid": "App.0.40.24.1-randc1b961c9",
68
- "continuous_update": false,
69
- "description": "Recumpute everything dynamically",
70
- "full_screen": true,
71
- "model_id": "mercury-app",
72
- "notify": "{}",
73
- "output": "app",
74
- "schedule": "",
75
- "show_code": false,
76
- "show_prompt": false,
77
- "show_sidebar": true,
78
- "static_notebook": false,
79
- "title": "Secom Web App Demo",
80
- "widget": "App"
81
- },
82
- "text/html": [
83
- "<h3>Mercury Application</h3><small>This output won't appear in the web app.</small>"
84
- ],
85
- "text/plain": [
86
- "mercury.App"
87
- ]
88
- },
89
- "metadata": {},
90
- "output_type": "display_data"
91
- }
92
- ],
93
- "source": [
94
- "app = mr.App(title=\"Secom Web App Demo\", description=\"Recumpute everything dynamically\", continuous_update=False)"
95
- ]
96
- },
97
- {
98
- "cell_type": "code",
99
- "execution_count": 3,
100
- "metadata": {
101
- "slideshow": {
102
- "slide_type": "skip"
103
- }
104
- },
105
- "outputs": [],
106
- "source": [
107
- "# Read the features data from the the url of csv into pandas dataframes and rename the columns to F1, F2, F3, etc.\n",
108
- "# Read the labels data from the url of csv into pandas dataframes and rename the columns to pass/fail and date/time\n",
109
- "\n",
110
- "#url_data = 'https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom.data'\n",
111
- "#url_labels = 'https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom_labels.data'\n",
112
- "\n",
113
- "url_data = '..\\Dataset\\secom_data.csv'\n",
114
- "url_labels = '..\\Dataset\\secom_labels.csv'\n",
115
- "\n",
116
- "features = pd.read_csv(url_data, delimiter=' ', header=None)\n",
117
- "labels = pd.read_csv(url_labels, delimiter=' ', names=['pass/fail', 'date_time'])\n",
118
- "\n",
119
- "prefix = 'F'\n",
120
- "new_column_names = [prefix + str(i) for i in range(1, len(features.columns)+1)]\n",
121
- "features.columns = new_column_names\n",
122
- "\n",
123
- "labels['pass/fail'] = labels['pass/fail'].replace({-1: 0, 1: 1})\n"
124
- ]
125
- },
126
- {
127
- "attachments": {},
128
- "cell_type": "markdown",
129
- "metadata": {
130
- "slideshow": {
131
- "slide_type": "skip"
132
- }
133
- },
134
- "source": [
135
- "#### **Split the data**"
136
- ]
137
- },
138
- {
139
- "cell_type": "code",
140
- "execution_count": 4,
141
- "metadata": {
142
- "slideshow": {
143
- "slide_type": "skip"
144
- }
145
- },
146
- "outputs": [
147
- {
148
- "name": "stdout",
149
- "output_type": "stream",
150
- "text": [
151
- "Dropped date/time column from labels dataframe\n"
152
- ]
153
- }
154
- ],
155
- "source": [
156
- "# if there is a date/time column, drop it from the features and labels dataframes, else continue\n",
157
- "\n",
158
- "if 'date_time' in labels.columns:\n",
159
- " labels = labels.drop(['date_time'], axis=1)\n",
160
- " print('Dropped date/time column from labels dataframe')\n",
161
- "\n",
162
- "\n",
163
- "# Split the dataset and the labels into training and testing sets\n",
164
- "# use stratify to ensure that the training and testing sets have the same percentage of pass and fail labels\n",
165
- "# use random_state to ensure that the same random split is generated each time the code is run\n",
166
- "\n",
167
- "\n",
168
- "X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.25, stratify=labels, random_state=13)"
169
- ]
170
- },
171
- {
172
- "attachments": {},
173
- "cell_type": "markdown",
174
- "metadata": {
175
- "slideshow": {
176
- "slide_type": "skip"
177
- }
178
- },
179
- "source": [
180
- "### **Functions**"
181
- ]
182
- },
183
- {
184
- "attachments": {},
185
- "cell_type": "markdown",
186
- "metadata": {
187
- "slideshow": {
188
- "slide_type": "skip"
189
- }
190
- },
191
- "source": [
192
- "#### **Feature Removal**"
193
- ]
194
- },
195
- {
196
- "cell_type": "code",
197
- "execution_count": 5,
198
- "metadata": {
199
- "slideshow": {
200
- "slide_type": "skip"
201
- }
202
- },
203
- "outputs": [],
204
- "source": [
205
- "def columns_to_drop(df,drop_duplicates='yes', missing_values_threshold=100, variance_threshold=0, \n",
206
- " correlation_threshold=1.1):\n",
207
- " \n",
208
- " print('Shape of the dataframe is: ', df.shape)\n",
209
- "\n",
210
- " # Drop duplicated columns\n",
211
- " if drop_duplicates == 'yes':\n",
212
- " new_column_names = df.columns\n",
213
- " df = df.T.drop_duplicates().T\n",
214
- " print('the number of columns to be dropped due to duplications is: ', len(new_column_names) - len(df.columns))\n",
215
- " drop_duplicated = list(set(new_column_names) - set(df.columns))\n",
216
- "\n",
217
- " elif drop_duplicates == 'no':\n",
218
- " df = df.T.T\n",
219
- " print('No columns were dropped due to duplications') \n",
220
- "\n",
221
- " # Print the percentage of columns in df with missing values more than or equal to threshold\n",
222
- " print('the number of columns to be dropped due to missing values is: ', len(df.isnull().mean()[df.isnull().mean() > missing_values_threshold/100].index))\n",
223
- " \n",
224
- " # Print into a list the columns to be dropped due to missing values\n",
225
- " drop_missing = list(df.isnull().mean()[df.isnull().mean() > missing_values_threshold/100].index)\n",
226
- "\n",
227
- " # Drop columns with more than or equal to threshold missing values from df\n",
228
- " df.drop(drop_missing, axis=1, inplace=True)\n",
229
- " \n",
230
- " # Print the number of columns in df with variance less than threshold\n",
231
- " print('the number of columns to be dropped due to low variance is: ', len(df.var()[df.var() <= variance_threshold].index))\n",
232
- "\n",
233
- " # Print into a list the columns to be dropped due to low variance\n",
234
- " drop_variance = list(df.var()[df.var() <= variance_threshold].index)\n",
235
- "\n",
236
- " # Drop columns with more than or equal to threshold variance from df\n",
237
- " df.drop(drop_variance, axis=1, inplace=True)\n",
238
- "\n",
239
- " # Print the number of columns in df with more than or equal to threshold correlation\n",
240
- " \n",
241
- " # Create correlation matrix and round it to 4 decimal places\n",
242
- " corr_matrix = df.corr().abs().round(4)\n",
243
- " upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))\n",
244
- " to_drop = [column for column in upper.columns if any(upper[column] >= correlation_threshold)]\n",
245
- " print('the number of columns to be dropped due to high correlation is: ', len(to_drop))\n",
246
- "\n",
247
- " # Print into a list the columns to be dropped due to high correlation\n",
248
- " drop_correlation = [column for column in upper.columns if any(upper[column] >= correlation_threshold)]\n",
249
- "\n",
250
- " # Drop columns with more than or equal to threshold correlation from df\n",
251
- " df.drop(to_drop, axis=1, inplace=True)\n",
252
- " \n",
253
- " if drop_duplicates == 'yes':\n",
254
- " dropped = (drop_duplicated+drop_missing+drop_variance+drop_correlation)\n",
255
- "\n",
256
- " elif drop_duplicates =='no':\n",
257
- " dropped = (drop_missing+drop_variance+drop_correlation)\n",
258
- " \n",
259
- " print('Total number of columns to be dropped is: ', len(dropped))\n",
260
- " print('New shape of the dataframe is: ', df.shape)\n",
261
- "\n",
262
- " global drop_duplicates_var\n",
263
- " drop_duplicates_var = drop_duplicates\n",
264
- " \n",
265
- " global missing_values_threshold_var\n",
266
- " missing_values_threshold_var = missing_values_threshold\n",
267
- "\n",
268
- " global variance_threshold_var\n",
269
- " variance_threshold_var = variance_threshold\n",
270
- "\n",
271
- " global correlation_threshold_var\n",
272
- " correlation_threshold_var = correlation_threshold\n",
273
- " \n",
274
- " print(type(dropped))\n",
275
- " return dropped"
276
- ]
277
- },
278
- {
279
- "attachments": {},
280
- "cell_type": "markdown",
281
- "metadata": {
282
- "slideshow": {
283
- "slide_type": "skip"
284
- }
285
- },
286
- "source": [
287
- "#### **Outlier Removal**"
288
- ]
289
- },
290
- {
291
- "cell_type": "code",
292
- "execution_count": 6,
293
- "metadata": {
294
- "slideshow": {
295
- "slide_type": "skip"
296
- }
297
- },
298
- "outputs": [],
299
- "source": [
300
- "def outlier_removal(z_df, z_threshold=4):\n",
301
- " \n",
302
- " global outlier_var\n",
303
- "\n",
304
- " if z_threshold == 'none':\n",
305
- " print('No outliers were removed')\n",
306
- " outlier_var = 'none'\n",
307
- " return z_df\n",
308
- " \n",
309
- " else:\n",
310
- " print('The z-score threshold is:', z_threshold)\n",
311
- "\n",
312
- " z_df_copy = z_df.copy()\n",
313
- "\n",
314
- " z_scores = np.abs(stats.zscore(z_df_copy))\n",
315
- "\n",
316
- " # Identify the outliers in the dataset using the z-score method\n",
317
- " outliers_mask = z_scores > z_threshold\n",
318
- " z_df_copy[outliers_mask] = np.nan\n",
319
- "\n",
320
- " outliers_count = np.count_nonzero(outliers_mask)\n",
321
- " print('The number of outliers in the whole dataset is / was:', outliers_count)\n",
322
- "\n",
323
- " outlier_var = z_threshold\n",
324
- "\n",
325
- " print(type(z_df_copy))\n",
326
- " return z_df_copy"
327
- ]
328
- },
329
- {
330
- "attachments": {},
331
- "cell_type": "markdown",
332
- "metadata": {
333
- "slideshow": {
334
- "slide_type": "skip"
335
- }
336
- },
337
- "source": [
338
- "#### **Scaling Methods**"
339
- ]
340
- },
341
- {
342
- "cell_type": "code",
343
- "execution_count": 7,
344
- "metadata": {
345
- "slideshow": {
346
- "slide_type": "skip"
347
- }
348
- },
349
- "outputs": [],
350
- "source": [
351
- "# define a function to scale the dataframe using different scaling models\n",
352
- "\n",
353
- "def scale_dataframe(scale_model,df_fit, df_transform):\n",
354
- " \n",
355
- " global scale_model_var\n",
356
- "\n",
357
- " if scale_model == 'robust':\n",
358
- " from sklearn.preprocessing import RobustScaler\n",
359
- " scaler = RobustScaler()\n",
360
- " scaler.fit(df_fit)\n",
361
- " df_scaled = scaler.transform(df_transform)\n",
362
- " df_scaled = pd.DataFrame(df_scaled, columns=df_transform.columns)\n",
363
- " print('The dataframe has been scaled using the robust scaling model')\n",
364
- " scale_model_var = 'robust'\n",
365
- " return df_scaled\n",
366
- " \n",
367
- " elif scale_model == 'standard':\n",
368
- " from sklearn.preprocessing import StandardScaler\n",
369
- " scaler = StandardScaler()\n",
370
- " scaler.fit(df_fit)\n",
371
- " df_scaled = scaler.transform(df_transform)\n",
372
- " df_scaled = pd.DataFrame(df_scaled, columns=df_transform.columns)\n",
373
- " print('The dataframe has been scaled using the standard scaling model')\n",
374
- " scale_model_var = 'standard'\n",
375
- " return df_scaled\n",
376
- " \n",
377
- " elif scale_model == 'normal':\n",
378
- " from sklearn.preprocessing import Normalizer\n",
379
- " scaler = Normalizer()\n",
380
- " scaler.fit(df_fit)\n",
381
- " df_scaled = scaler.transform(df_transform)\n",
382
- " df_scaled = pd.DataFrame(df_scaled, columns=df_transform.columns)\n",
383
- " print('The dataframe has been scaled using the normal scaling model')\n",
384
- " scale_model_var = 'normal'\n",
385
- " return df_scaled\n",
386
- " \n",
387
- " elif scale_model == 'minmax':\n",
388
- " from sklearn.preprocessing import MinMaxScaler\n",
389
- " scaler = MinMaxScaler()\n",
390
- " scaler.fit(df_fit)\n",
391
- " df_scaled = scaler.transform(df_transform)\n",
392
- " df_scaled = pd.DataFrame(df_scaled, columns=df_transform.columns)\n",
393
- " print('The dataframe has been scaled using the minmax scaling model')\n",
394
- " scale_model_var = 'minmax'\n",
395
- " return df_scaled\n",
396
- " \n",
397
- " elif scale_model == 'none':\n",
398
- " print('The dataframe has not been scaled')\n",
399
- " scale_model_var = 'none'\n",
400
- " return df_transform\n",
401
- " \n",
402
- " else:\n",
403
- " print('Please choose a valid scaling model: robust, standard, normal, or minmax')\n",
404
- " return None"
405
- ]
406
- },
407
- {
408
- "attachments": {},
409
- "cell_type": "markdown",
410
- "metadata": {
411
- "slideshow": {
412
- "slide_type": "skip"
413
- }
414
- },
415
- "source": [
416
- "#### **Missing Value Imputation**"
417
- ]
418
- },
419
- {
420
- "cell_type": "code",
421
- "execution_count": 8,
422
- "metadata": {
423
- "slideshow": {
424
- "slide_type": "skip"
425
- }
426
- },
427
- "outputs": [],
428
- "source": [
429
- "# define a function to impute missing values using different imputation models\n",
430
- "\n",
431
- "def impute_missing_values(imputation, df_fit, df_transform, n_neighbors=5):\n",
432
- "\n",
433
- " print('Number of missing values before imputation: ', df_transform.isnull().sum().sum())\n",
434
- "\n",
435
- " global imputation_var\n",
436
- "\n",
437
- " if imputation == 'knn':\n",
438
- "\n",
439
- " from sklearn.impute import KNNImputer\n",
440
- " imputer = KNNImputer(n_neighbors=n_neighbors)\n",
441
- " imputer.fit(df_fit)\n",
442
- " df_imputed = imputer.transform(df_transform)\n",
443
- " df_imputed = pd.DataFrame(df_imputed, columns=df_transform.columns)\n",
444
- " print('Number of missing values after imputation: ', df_imputed.isnull().sum().sum())\n",
445
- " imputation_var = 'knn'\n",
446
- " return df_imputed\n",
447
- " \n",
448
- " elif imputation == 'mean':\n",
449
- "\n",
450
- " from sklearn.impute import SimpleImputer\n",
451
- " imputer = SimpleImputer(strategy='mean')\n",
452
- " imputer.fit(df_fit)\n",
453
- " df_imputed = imputer.transform(df_transform)\n",
454
- " df_imputed = pd.DataFrame(df_imputed, columns=df_transform.columns)\n",
455
- " print('Number of missing values after imputation: ', df_imputed.isnull().sum().sum())\n",
456
- " imputation_var = 'mean'\n",
457
- " return df_imputed\n",
458
- " \n",
459
- " elif imputation == 'median':\n",
460
- "\n",
461
- " from sklearn.impute import SimpleImputer\n",
462
- " imputer = SimpleImputer(strategy='median')\n",
463
- " imputer.fit(df_fit)\n",
464
- " df_imputed = imputer.transform(df_transform)\n",
465
- " df_imputed = pd.DataFrame(df_imputed, columns=df_transform.columns)\n",
466
- " print('Number of missing values after imputation: ', df_imputed.isnull().sum().sum())\n",
467
- " imputation_var = 'median'\n",
468
- " return df_imputed\n",
469
- " \n",
470
- " elif imputation == 'most_frequent':\n",
471
- " \n",
472
- " from sklearn.impute import SimpleImputer\n",
473
- " imputer = SimpleImputer(strategy='most_frequent')\n",
474
- " imputer.fit(df_fit)\n",
475
- " df_imputed = imputer.transform(df_transform)\n",
476
- " df_imputed = pd.DataFrame(df_imputed, columns=df_transform.columns)\n",
477
- " print('Number of missing values after imputation: ', df_imputed.isnull().sum().sum())\n",
478
- " imputation_var = 'most_frequent'\n",
479
- " return df_imputed\n",
480
- " \n",
481
- " else:\n",
482
- " print('Please choose an imputation model from the following: knn, mean, median, most_frequent')\n",
483
- " df_imputed = df_transform.copy()\n",
484
- " return df_imputed\n"
485
- ]
486
- },
487
- {
488
- "attachments": {},
489
- "cell_type": "markdown",
490
- "metadata": {
491
- "slideshow": {
492
- "slide_type": "skip"
493
- }
494
- },
495
- "source": [
496
- "#### **Imbalance Treatment**"
497
- ]
498
- },
499
- {
500
- "cell_type": "code",
501
- "execution_count": 9,
502
- "metadata": {
503
- "slideshow": {
504
- "slide_type": "skip"
505
- }
506
- },
507
- "outputs": [],
508
- "source": [
509
- "#define a function to oversample and understamble the imbalance in the training set\n",
510
- "\n",
511
- "def imbalance_treatment(method, X_train, y_train):\n",
512
- "\n",
513
- " global imbalance_var\n",
514
- "\n",
515
- " if method == 'smote': \n",
516
- " from imblearn.over_sampling import SMOTE\n",
517
- " sm = SMOTE(random_state=42)\n",
518
- " X_train_res, y_train_res = sm.fit_resample(X_train, y_train)\n",
519
- " print('Shape of the training set after oversampling with SMOTE: ', X_train_res.shape)\n",
520
- " print('Value counts of the target variable after oversampling with SMOTE: \\n', y_train_res.value_counts())\n",
521
- " imbalance_var = 'smote'\n",
522
- " return X_train_res, y_train_res\n",
523
- " \n",
524
- " if method == 'undersampling':\n",
525
- " from imblearn.under_sampling import RandomUnderSampler\n",
526
- " rus = RandomUnderSampler(random_state=42)\n",
527
- " X_train_res, y_train_res = rus.fit_resample(X_train, y_train)\n",
528
- " print('Shape of the training set after undersampling with RandomUnderSampler: ', X_train_res.shape)\n",
529
- " print('Value counts of the target variable after undersampling with RandomUnderSampler: \\n', y_train_res.value_counts())\n",
530
- " imbalance_var = 'random_undersampling'\n",
531
- " return X_train_res, y_train_res\n",
532
- " \n",
533
- " if method == 'rose':\n",
534
- " from imblearn.over_sampling import RandomOverSampler\n",
535
- " ros = RandomOverSampler(random_state=42)\n",
536
- " X_train_res, y_train_res = ros.fit_resample(X_train, y_train)\n",
537
- " print('Shape of the training set after oversampling with RandomOverSampler: ', X_train_res.shape)\n",
538
- " print('Value counts of the target variable after oversampling with RandomOverSampler: \\n', y_train_res.value_counts())\n",
539
- " imbalance_var = 'rose'\n",
540
- " return X_train_res, y_train_res\n",
541
- " \n",
542
- " \n",
543
- " if method == 'none':\n",
544
- " X_train_res = X_train\n",
545
- " y_train_res = y_train\n",
546
- " print('Shape of the training set after no resampling: ', X_train_res.shape)\n",
547
- " print('Value counts of the target variable after no resampling: \\n', y_train_res.value_counts())\n",
548
- " imbalance_var = 'none'\n",
549
- " return X_train_res, y_train_res\n",
550
- " \n",
551
- " else:\n",
552
- " print('Please choose a valid resampling method: smote, rose, undersampling or none')\n",
553
- " X_train_res = X_train\n",
554
- " y_train_res = y_train\n",
555
- " return X_train_res, y_train_res"
556
- ]
557
- },
558
- {
559
- "attachments": {},
560
- "cell_type": "markdown",
561
- "metadata": {
562
- "slideshow": {
563
- "slide_type": "skip"
564
- }
565
- },
566
- "source": [
567
- "#### **Training Models**"
568
- ]
569
- },
570
- {
571
- "cell_type": "code",
572
- "execution_count": 10,
573
- "metadata": {
574
- "slideshow": {
575
- "slide_type": "skip"
576
- }
577
- },
578
- "outputs": [],
579
- "source": [
580
- "# define a function where you can choose the model you want to use to train the data\n",
581
- "\n",
582
- "def train_model(model, X_train, y_train, X_test, y_test):\n",
583
- "\n",
584
- " global model_var\n",
585
- "\n",
586
- " if model == 'random_forest':\n",
587
- " from sklearn.ensemble import RandomForestClassifier\n",
588
- " rfc = RandomForestClassifier(n_estimators=100, random_state=13)\n",
589
- " rfc.fit(X_train, y_train)\n",
590
- " y_pred = rfc.predict(X_test)\n",
591
- " model_var = 'random_forest'\n",
592
- " return y_pred\n",
593
- "\n",
594
- " if model == 'logistic_regression':\n",
595
- " from sklearn.linear_model import LogisticRegression\n",
596
- " lr = LogisticRegression()\n",
597
- " lr.fit(X_train, y_train)\n",
598
- " y_pred = lr.predict(X_test)\n",
599
- " model_var = 'logistic_regression'\n",
600
- " return y_pred\n",
601
- " \n",
602
- " if model == 'knn':\n",
603
- " from sklearn.neighbors import KNeighborsClassifier\n",
604
- " knn = KNeighborsClassifier(n_neighbors=5)\n",
605
- " knn.fit(X_train, y_train)\n",
606
- " y_pred = knn.predict(X_test)\n",
607
- " model_var = 'knn'\n",
608
- " return y_pred\n",
609
- " \n",
610
- " if model == 'svm':\n",
611
- " from sklearn.svm import SVC\n",
612
- " svm = SVC()\n",
613
- " svm.fit(X_train, y_train)\n",
614
- " y_pred = svm.predict(X_test)\n",
615
- " model_var = 'svm'\n",
616
- " return y_pred\n",
617
- " \n",
618
- " if model == 'naive_bayes':\n",
619
- " from sklearn.naive_bayes import GaussianNB\n",
620
- " nb = GaussianNB()\n",
621
- " nb.fit(X_train, y_train)\n",
622
- " y_pred = nb.predict(X_test)\n",
623
- " model_var = 'naive_bayes'\n",
624
- " return y_pred\n",
625
- " \n",
626
- " if model == 'decision_tree':\n",
627
- " from sklearn.tree import DecisionTreeClassifier\n",
628
- " dt = DecisionTreeClassifier()\n",
629
- " dt.fit(X_train, y_train)\n",
630
- " y_pred = dt.predict(X_test)\n",
631
- " model_var = 'decision_tree'\n",
632
- " return y_pred\n",
633
- " \n",
634
- " if model == 'xgboost':\n",
635
- " from xgboost import XGBClassifier\n",
636
- " xgb = XGBClassifier()\n",
637
- " xgb.fit(X_train, y_train)\n",
638
- " y_pred = xgb.predict(X_test)\n",
639
- " model_var = 'xgboost'\n",
640
- " return y_pred\n",
641
- " \n",
642
- " else:\n",
643
- " print('Please choose a model from the following: random_forest, logistic_regression, knn, svm, naive_bayes, decision_tree, xgboost')\n",
644
- " return None"
645
- ]
646
- },
647
- {
648
- "attachments": {},
649
- "cell_type": "markdown",
650
- "metadata": {
651
- "slideshow": {
652
- "slide_type": "skip"
653
- }
654
- },
655
- "source": [
656
- "#### **Evaluation Function**"
657
- ]
658
- },
659
- {
660
- "cell_type": "code",
661
- "execution_count": 11,
662
- "metadata": {
663
- "slideshow": {
664
- "slide_type": "skip"
665
- }
666
- },
667
- "outputs": [],
668
- "source": [
669
- "#define a function that prints the strings below\n",
670
- "\n",
671
- "from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\n",
672
- "\n",
673
- "def evaluate_models(model='all'):\n",
674
- " print('Have the duplicates been removed?', drop_duplicates_var)\n",
675
- " print('What is the missing values threshold %?', missing_values_threshold_var)\n",
676
- " print('What is the variance threshold?', variance_threshold_var)\n",
677
- " print('What is the correlation threshold?', correlation_threshold_var)\n",
678
- " print('What is the outlier removal threshold?', outlier_var)\n",
679
- " print('What is the scaling method?', scale_model_var)\n",
680
- " print('What is the imputation method?', imputation_var) \n",
681
- " print('What is the imbalance treatment?', imbalance_var)\n",
682
- "\n",
683
- " all_models = ['random_forest', 'logistic_regression', 'knn', 'svm', 'naive_bayes', 'decision_tree', 'xgboost']\n",
684
- " evaluation_score_append = []\n",
685
- " evaluation_count_append = []\n",
686
- " \n",
687
- " for selected_model in all_models:\n",
688
- " \n",
689
- " if model == 'all' or model == selected_model:\n",
690
- "\n",
691
- " evaluation_score = []\n",
692
- " evaluation_count = []\n",
693
- "\n",
694
- " y_pred = globals()['y_pred_' + selected_model] # Get the prediction variable dynamically\n",
695
- "\n",
696
- " def namestr(obj, namespace):\n",
697
- " return [name for name in namespace if namespace[name] is obj]\n",
698
- "\n",
699
- " model_name = namestr(y_pred, globals())[0]\n",
700
- " model_name = model_name.replace('y_pred_', '') \n",
701
- "\n",
702
- " cm = confusion_matrix(y_test, y_pred)\n",
703
- "\n",
704
- " # create a dataframe with the results for each model\n",
705
- "\n",
706
- " evaluation_score.append(model_name)\n",
707
- " evaluation_score.append(round(accuracy_score(y_test, y_pred), 2))\n",
708
- " evaluation_score.append(round(precision_score(y_test, y_pred, zero_division=0), 2))\n",
709
- " evaluation_score.append(round(recall_score(y_test, y_pred), 2))\n",
710
- " evaluation_score.append(round(f1_score(y_test, y_pred), 2))\n",
711
- " evaluation_score_append.append(evaluation_score)\n",
712
- "\n",
713
- "\n",
714
- " # create a dataframe with the true positives, true negatives, false positives and false negatives for each model\n",
715
- "\n",
716
- " evaluation_count.append(model_name)\n",
717
- " evaluation_count.append(cm[0][0])\n",
718
- " evaluation_count.append(cm[0][1])\n",
719
- " evaluation_count.append(cm[1][0])\n",
720
- " evaluation_count.append(cm[1][1])\n",
721
- " evaluation_count_append.append(evaluation_count)\n",
722
- "\n",
723
- " \n",
724
- " evaluation_score_append = pd.DataFrame(evaluation_score_append, \n",
725
- " columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1-score'])\n",
726
- " \n",
727
- " \n",
728
- "\n",
729
- " evaluation_count_append = pd.DataFrame(evaluation_count_append,\n",
730
- " columns=['Model', 'True Negatives', 'False Positives', 'False Negatives', 'True Positives'])\n",
731
- " \n",
732
- " \n",
733
- " return evaluation_score_append, evaluation_count_append"
734
- ]
735
- },
736
- {
737
- "attachments": {},
738
- "cell_type": "markdown",
739
- "metadata": {
740
- "slideshow": {
741
- "slide_type": "skip"
742
- }
743
- },
744
- "source": [
745
- "### **Input Variables**"
746
- ]
747
- },
748
- {
749
- "cell_type": "code",
750
- "execution_count": 12,
751
- "metadata": {
752
- "slideshow": {
753
- "slide_type": "skip"
754
- }
755
- },
756
- "outputs": [
757
- {
758
- "data": {
759
- "application/mercury+json": {
760
- "choices": [
761
- "yes",
762
- "no"
763
- ],
764
- "code_uid": "Select.0.40.16.25-rand87a54245",
765
- "disabled": false,
766
- "hidden": false,
767
- "label": "Drop Duplicates",
768
- "model_id": "5464c30e15a543b5901a81c32250831b",
769
- "url_key": "",
770
- "value": "yes",
771
- "widget": "Select"
772
- },
773
- "application/vnd.jupyter.widget-view+json": {
774
- "model_id": "5464c30e15a543b5901a81c32250831b",
775
- "version_major": 2,
776
- "version_minor": 0
777
- },
778
- "text/plain": [
779
- "mercury.Select"
780
- ]
781
- },
782
- "metadata": {},
783
- "output_type": "display_data"
784
- },
785
- {
786
- "data": {
787
- "application/mercury+json": {
788
- "code_uid": "Text.0.40.15.28-rand6e89b88d",
789
- "disabled": false,
790
- "hidden": false,
791
- "label": "Missing Value Threeshold",
792
- "model_id": "1740f68e25d04e1cb3b7768df15d5873",
793
- "rows": 1,
794
- "url_key": "",
795
- "value": "80",
796
- "widget": "Text"
797
- },
798
- "application/vnd.jupyter.widget-view+json": {
799
- "model_id": "1740f68e25d04e1cb3b7768df15d5873",
800
- "version_major": 2,
801
- "version_minor": 0
802
- },
803
- "text/plain": [
804
- "mercury.Text"
805
- ]
806
- },
807
- "metadata": {},
808
- "output_type": "display_data"
809
- },
810
- {
811
- "data": {
812
- "application/mercury+json": {
813
- "code_uid": "Text.0.40.15.31-rand74006b42",
814
- "disabled": false,
815
- "hidden": false,
816
- "label": "Variance Threshold",
817
- "model_id": "559d04e880944fd29ca3478a7a4b20ff",
818
- "rows": 1,
819
- "url_key": "",
820
- "value": "0",
821
- "widget": "Text"
822
- },
823
- "application/vnd.jupyter.widget-view+json": {
824
- "model_id": "559d04e880944fd29ca3478a7a4b20ff",
825
- "version_major": 2,
826
- "version_minor": 0
827
- },
828
- "text/plain": [
829
- "mercury.Text"
830
- ]
831
- },
832
- "metadata": {},
833
- "output_type": "display_data"
834
- },
835
- {
836
- "data": {
837
- "application/mercury+json": {
838
- "code_uid": "Text.0.40.15.34-rand05bb51ec",
839
- "disabled": false,
840
- "hidden": false,
841
- "label": "Correlation Threshold",
842
- "model_id": "896e1c9e96b04caeb3df77ad7c0c5ff2",
843
- "rows": 1,
844
- "url_key": "",
845
- "value": "1",
846
- "widget": "Text"
847
- },
848
- "application/vnd.jupyter.widget-view+json": {
849
- "model_id": "896e1c9e96b04caeb3df77ad7c0c5ff2",
850
- "version_major": 2,
851
- "version_minor": 0
852
- },
853
- "text/plain": [
854
- "mercury.Text"
855
- ]
856
- },
857
- "metadata": {},
858
- "output_type": "display_data"
859
- },
860
- {
861
- "data": {
862
- "application/mercury+json": {
863
- "choices": [
864
- "none",
865
- 3,
866
- 4,
867
- 5
868
- ],
869
- "code_uid": "Select.0.40.16.38-randc39f4f69",
870
- "disabled": false,
871
- "hidden": false,
872
- "label": "Outlier Removal Threshold",
873
- "model_id": "6d9dea0253ca4bfbb452c5ee16bfb821",
874
- "url_key": "",
875
- "value": "none",
876
- "widget": "Select"
877
- },
878
- "application/vnd.jupyter.widget-view+json": {
879
- "model_id": "6d9dea0253ca4bfbb452c5ee16bfb821",
880
- "version_major": 2,
881
- "version_minor": 0
882
- },
883
- "text/plain": [
884
- "mercury.Select"
885
- ]
886
- },
887
- "metadata": {},
888
- "output_type": "display_data"
889
- },
890
- {
891
- "data": {
892
- "application/mercury+json": {
893
- "choices": [
894
- "none",
895
- "normal",
896
- "standard",
897
- "minmax",
898
- "robust"
899
- ],
900
- "code_uid": "Select.0.40.16.46-rand80d22513",
901
- "disabled": false,
902
- "hidden": false,
903
- "label": "Scaling Variables",
904
- "model_id": "7fbd91c876aa4b2591239e1cc5e8c0d1",
905
- "url_key": "",
906
- "value": "none",
907
- "widget": "Select"
908
- },
909
- "application/vnd.jupyter.widget-view+json": {
910
- "model_id": "7fbd91c876aa4b2591239e1cc5e8c0d1",
911
- "version_major": 2,
912
- "version_minor": 0
913
- },
914
- "text/plain": [
915
- "mercury.Select"
916
- ]
917
- },
918
- "metadata": {},
919
- "output_type": "display_data"
920
- },
921
- {
922
- "data": {
923
- "application/mercury+json": {
924
- "choices": [
925
- "mean",
926
- "median",
927
- "knn",
928
- "most_frequent"
929
- ],
930
- "code_uid": "Select.0.40.16.50-rand00afecfa",
931
- "disabled": false,
932
- "hidden": false,
933
- "label": "Imputation Methods",
934
- "model_id": "15298e99f1ad40469be71f5787356f53",
935
- "url_key": "",
936
- "value": "mean",
937
- "widget": "Select"
938
- },
939
- "application/vnd.jupyter.widget-view+json": {
940
- "model_id": "15298e99f1ad40469be71f5787356f53",
941
- "version_major": 2,
942
- "version_minor": 0
943
- },
944
- "text/plain": [
945
- "mercury.Select"
946
- ]
947
- },
948
- "metadata": {},
949
- "output_type": "display_data"
950
- },
951
- {
952
- "data": {
953
- "application/mercury+json": {
954
- "choices": [
955
- "none",
956
- "smote",
957
- "undersampling",
958
- "rose"
959
- ],
960
- "code_uid": "Select.0.40.16.55-rand9393c38d",
961
- "disabled": false,
962
- "hidden": false,
963
- "label": "Imbalance Treatment",
964
- "model_id": "9c2d3e6384a1481ea6dc6f7060404ef8",
965
- "url_key": "",
966
- "value": "none",
967
- "widget": "Select"
968
- },
969
- "application/vnd.jupyter.widget-view+json": {
970
- "model_id": "9c2d3e6384a1481ea6dc6f7060404ef8",
971
- "version_major": 2,
972
- "version_minor": 0
973
- },
974
- "text/plain": [
975
- "mercury.Select"
976
- ]
977
- },
978
- "metadata": {},
979
- "output_type": "display_data"
980
- },
981
- {
982
- "data": {
983
- "application/mercury+json": {
984
- "choices": [
985
- "random_forest",
986
- "logistic_regression",
987
- "knn",
988
- "svm",
989
- "naive_bayes",
990
- "decision_tree",
991
- "xgboost"
992
- ],
993
- "code_uid": "Select.0.40.16.60-rand44169a53",
994
- "disabled": false,
995
- "hidden": false,
996
- "label": "Model Selection",
997
- "model_id": "081168c57bb84be68f9a62734a7d5520",
998
- "url_key": "",
999
- "value": "random_forest",
1000
- "widget": "Select"
1001
- },
1002
- "application/vnd.jupyter.widget-view+json": {
1003
- "model_id": "081168c57bb84be68f9a62734a7d5520",
1004
- "version_major": 2,
1005
- "version_minor": 0
1006
- },
1007
- "text/plain": [
1008
- "mercury.Select"
1009
- ]
1010
- },
1011
- "metadata": {},
1012
- "output_type": "display_data"
1013
- }
1014
- ],
1015
- "source": [
1016
- "\n",
1017
- "evaluation_score_df = pd.DataFrame(columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1-score', 'model_variables'])\n",
1018
- "evaluation_count_df = pd.DataFrame(columns=['Model', 'True Negatives', 'False Positives', 'False Negatives', 'True Positives', 'model_variables'])\n",
1019
- "\n",
1020
- "#############################################################################################################\n",
1021
- "# reset the dataframe containing all results, evaluation_score_df and evaluation_count_df\n",
1022
- "\n",
1023
- "reset_results = 'no' # 'yes' or 'no'\n",
1024
- "\n",
1025
- "#############################################################################################################\n",
1026
- "\n",
1027
- "if reset_results == 'yes':\n",
1028
- " evaluation_score_df = pd.DataFrame(columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1-score', 'model_variables'])\n",
1029
- " evaluation_count_df = pd.DataFrame(columns=['Model', 'True Negatives', 'False Positives', 'False Negatives', 'True Positives', 'model_variables'])\n",
1030
- " \n",
1031
- "\n",
1032
- "#############################################################################################################\n",
1033
- "\n",
1034
- "# input train and test sets\n",
1035
- "input_train_set = X_train\n",
1036
- "input_test_set = X_test\n",
1037
- "\n",
1038
- "\n",
1039
- "\n",
1040
- "# input feature removal variables\n",
1041
- "input_drop_duplicates = mr.Select(label=\"Drop Duplicates\", value=\"yes\", choices=[\"yes\", \"no\"]) # 'yes' or 'no'\n",
1042
- "input_drop_duplicates = str(input_drop_duplicates.value)\n",
1043
- "\n",
1044
- "input_missing_values_threshold = mr.Text(label=\"Missing Value Threeshold\", value='80') # 0-100 (removes columns with more missing values than the threshold)\n",
1045
- "input_missing_values_threshold = int(input_missing_values_threshold.value)\n",
1046
- "\n",
1047
- "input_variance_threshold = mr.Text(label=\"Variance Threshold\", value='0') # \n",
1048
- "input_variance_threshold = float(input_variance_threshold.value)\n",
1049
- "\n",
1050
- "input_correlation_threshold = mr.Text(label=\"Correlation Threshold\", value='1') # \n",
1051
- "input_correlation_threshold = float(input_correlation_threshold.value)\n",
1052
- "\n",
1053
- "# input outlier removal variables\n",
1054
- "input_outlier_removal_threshold = mr.Select(label=\"Outlier Removal Threshold\", value=\"none\", choices=['none', 3, 4, 5]) # 'none' or zscore from 0 to 100\n",
1055
- "\n",
1056
- "if input_outlier_removal_threshold.value != 'none':\n",
1057
- " input_outlier_removal_threshold = int(input_outlier_removal_threshold.value)\n",
1058
- "elif input_outlier_removal_threshold.value == 'none':\n",
1059
- " input_outlier_removal_threshold = str(input_outlier_removal_threshold.value)\n",
1060
- "\n",
1061
- "# input scaling variables\n",
1062
- "input_scale_model = mr.Select(label=\"Scaling Variables\", value=\"none\", choices=['none', 'normal', 'standard', 'minmax', 'robust']) # 'none', 'normal', 'standard', 'minmax', 'robust'\n",
1063
- "input_scale_model = str(input_scale_model.value)\n",
1064
- "\n",
1065
- "# input imputation variables\n",
1066
- "input_imputation_method = mr.Select(label=\"Imputation Methods\", value=\"mean\", choices=['mean', 'median', 'knn', 'most_frequent']) # 'mean', 'median', 'knn', 'most_frequent'\n",
1067
- "input_n_neighbors = 5 # only for knn imputation\n",
1068
- "input_imputation_method = str(input_imputation_method.value)\n",
1069
- "\n",
1070
- "# input imbalance treatment variables\n",
1071
- "input_imbalance_treatment = mr.Select(label=\"Imbalance Treatment\", value=\"none\", choices=['none', 'smote', 'undersampling', 'rose']) # 'none', 'smote', 'undersampling', 'rose'\n",
1072
- "input_imbalance_treatment = str(input_imbalance_treatment.value)\n",
1073
- "\n",
1074
- "\n",
1075
- "# input model\n",
1076
- "input_model = mr.Select(label=\"Model Selection\", value=\"random_forest\", choices=['random_forest', 'logistic_regression', 'knn', 'svm', 'naive_bayes','decision_tree','xgboost']) # 'all', 'random_forest', 'logistic_regression', 'knn', \n",
1077
- " # 'svm', 'naive_bayes', # 'decision_tree', 'xgboost'\n",
1078
- "input_model = str(input_model.value)\n"
1079
- ]
1080
- },
1081
- {
1082
- "attachments": {},
1083
- "cell_type": "markdown",
1084
- "metadata": {
1085
- "slideshow": {
1086
- "slide_type": "skip"
1087
- }
1088
- },
1089
- "source": [
1090
- "### **Transform Data**"
1091
- ]
1092
- },
1093
- {
1094
- "attachments": {},
1095
- "cell_type": "markdown",
1096
- "metadata": {
1097
- "slideshow": {
1098
- "slide_type": "skip"
1099
- }
1100
- },
1101
- "source": [
1102
- "#### **Remove Features**"
1103
- ]
1104
- },
1105
- {
1106
- "cell_type": "code",
1107
- "execution_count": 13,
1108
- "metadata": {
1109
- "slideshow": {
1110
- "slide_type": "skip"
1111
- }
1112
- },
1113
- "outputs": [
1114
- {
1115
- "name": "stdout",
1116
- "output_type": "stream",
1117
- "text": [
1118
- "Shape of the dataframe is: (1175, 590)\n",
1119
- "the number of columns to be dropped due to duplications is: 104\n",
1120
- "the number of columns to be dropped due to missing values is: 8\n",
1121
- "the number of columns to be dropped due to low variance is: 12\n",
1122
- "the number of columns to be dropped due to high correlation is: 21\n",
1123
- "Total number of columns to be dropped is: 145\n",
1124
- "New shape of the dataframe is: (1175, 445)\n",
1125
- "<class 'list'>\n",
1126
- "No outliers were removed\n",
1127
- "The dataframe has not been scaled\n",
1128
- "The dataframe has not been scaled\n",
1129
- "Number of missing values before imputation: 19977\n",
1130
- "Number of missing values after imputation: 0\n",
1131
- "Number of missing values before imputation: 6954\n",
1132
- "Number of missing values after imputation: 0\n",
1133
- "Shape of the training set after no resampling: (1175, 445)\n",
1134
- "Value counts of the target variable after no resampling: \n",
1135
- " pass/fail\n",
1136
- "0 1097\n",
1137
- "1 78\n",
1138
- "dtype: int64\n"
1139
- ]
1140
- }
1141
- ],
1142
- "source": [
1143
- "# remove features using the function list_columns_to_drop\n",
1144
- "\n",
1145
- "dropped = columns_to_drop(input_train_set, \n",
1146
- " input_drop_duplicates, input_missing_values_threshold, \n",
1147
- " input_variance_threshold, input_correlation_threshold)\n",
1148
- "\n",
1149
- "# drop the columns from the training and testing sets and save the new sets as new variables\n",
1150
- "\n",
1151
- "X_train2 = input_train_set.drop(dropped, axis=1)\n",
1152
- "X_test2 = input_test_set.drop(dropped, axis=1)\n",
1153
- "\n",
1154
- "X_train_dropped_outliers = outlier_removal(X_train2, input_outlier_removal_threshold)\n",
1155
- "\n",
1156
- "\n",
1157
- "X_train_scaled = scale_dataframe(input_scale_model, X_train_dropped_outliers, X_train_dropped_outliers)\n",
1158
- "X_test_scaled = scale_dataframe(input_scale_model, X_train_dropped_outliers, X_test2)\n",
1159
- "\n",
1160
- "# impute the missing values in the training and testing sets using the function impute_missing_values\n",
1161
- "\n",
1162
- "X_train_imputed = impute_missing_values(input_imputation_method,X_train_scaled, X_train_scaled, input_n_neighbors)\n",
1163
- "X_test_imputed = impute_missing_values(input_imputation_method,X_train_scaled, X_test_scaled, input_n_neighbors)\n",
1164
- "\n",
1165
- "# treat imbalance in the training set using the function oversample\n",
1166
- "\n",
1167
- "X_train_res, y_train_res = imbalance_treatment(input_imbalance_treatment, X_train_imputed, y_train)\n",
1168
- "\n"
1169
- ]
1170
- },
1171
- {
1172
- "attachments": {},
1173
- "cell_type": "markdown",
1174
- "metadata": {
1175
- "slideshow": {
1176
- "slide_type": "skip"
1177
- }
1178
- },
1179
- "source": [
1180
- "### **Model Training**"
1181
- ]
1182
- },
1183
- {
1184
- "cell_type": "code",
1185
- "execution_count": 14,
1186
- "metadata": {
1187
- "slideshow": {
1188
- "slide_type": "skip"
1189
- }
1190
- },
1191
- "outputs": [],
1192
- "source": [
1193
- "# disable warnings\n",
1194
- "\n",
1195
- "import warnings\n",
1196
- "warnings.filterwarnings('ignore')\n",
1197
- "\n",
1198
- "# train the model using the function train_model and save the predictions as new variables\n",
1199
- "\n",
1200
- "y_pred_random_forest = train_model('random_forest', X_train_res, y_train_res, X_test_imputed, y_test)\n",
1201
- "y_pred_logistic_regression = train_model('logistic_regression', X_train_res, y_train_res, X_test_imputed, y_test)\n",
1202
- "y_pred_knn = train_model('knn', X_train_res, y_train_res, X_test_imputed, y_test)\n",
1203
- "y_pred_svm = train_model('svm', X_train_res, y_train_res, X_test_imputed, y_test)\n",
1204
- "y_pred_naive_bayes = train_model('naive_bayes', X_train_res, y_train_res, X_test_imputed, y_test)\n",
1205
- "y_pred_decision_tree = train_model('decision_tree', X_train_res, y_train_res, X_test_imputed, y_test)\n",
1206
- "y_pred_xgboost = train_model('xgboost', X_train_res, y_train_res, X_test_imputed, y_test)"
1207
- ]
1208
- },
1209
- {
1210
- "attachments": {},
1211
- "cell_type": "markdown",
1212
- "metadata": {
1213
- "slideshow": {
1214
- "slide_type": "skip"
1215
- }
1216
- },
1217
- "source": [
1218
- "#### **Evaluate and Save**"
1219
- ]
1220
- },
1221
- {
1222
- "cell_type": "code",
1223
- "execution_count": 15,
1224
- "metadata": {
1225
- "slideshow": {
1226
- "slide_type": "slide"
1227
- }
1228
- },
1229
- "outputs": [
1230
- {
1231
- "name": "stdout",
1232
- "output_type": "stream",
1233
- "text": [
1234
- "Have the duplicates been removed? yes\n",
1235
- "What is the missing values threshold %? 80\n",
1236
- "What is the variance threshold? 0.0\n",
1237
- "What is the correlation threshold? 1.0\n",
1238
- "What is the outlier removal threshold? none\n",
1239
- "What is the scaling method? none\n",
1240
- "What is the imputation method? mean\n",
1241
- "What is the imbalance treatment? none\n"
1242
- ]
1243
- },
1244
- {
1245
- "data": {
1246
- "text/html": [
1247
- "<div>\n",
1248
- "<style scoped>\n",
1249
- " .dataframe tbody tr th:only-of-type {\n",
1250
- " vertical-align: middle;\n",
1251
- " }\n",
1252
- "\n",
1253
- " .dataframe tbody tr th {\n",
1254
- " vertical-align: top;\n",
1255
- " }\n",
1256
- "\n",
1257
- " .dataframe thead th {\n",
1258
- " text-align: right;\n",
1259
- " }\n",
1260
- "</style>\n",
1261
- "<table border=\"1\" class=\"dataframe\">\n",
1262
- " <thead>\n",
1263
- " <tr style=\"text-align: right;\">\n",
1264
- " <th></th>\n",
1265
- " <th>Model</th>\n",
1266
- " <th>Accuracy</th>\n",
1267
- " <th>Precision</th>\n",
1268
- " <th>Recall</th>\n",
1269
- " <th>F1-score</th>\n",
1270
- " </tr>\n",
1271
- " </thead>\n",
1272
- " <tbody>\n",
1273
- " <tr>\n",
1274
- " <th>0</th>\n",
1275
- " <td>random_forest</td>\n",
1276
- " <td>0.93</td>\n",
1277
- " <td>0.0</td>\n",
1278
- " <td>0.0</td>\n",
1279
- " <td>0.0</td>\n",
1280
- " </tr>\n",
1281
- " </tbody>\n",
1282
- "</table>\n",
1283
- "</div>"
1284
- ],
1285
- "text/plain": [
1286
- " Model Accuracy Precision Recall F1-score\n",
1287
- "0 random_forest 0.93 0.0 0.0 0.0"
1288
- ]
1289
- },
1290
- "metadata": {},
1291
- "output_type": "display_data"
1292
- },
1293
- {
1294
- "data": {
1295
- "text/html": [
1296
- "<div>\n",
1297
- "<style scoped>\n",
1298
- " .dataframe tbody tr th:only-of-type {\n",
1299
- " vertical-align: middle;\n",
1300
- " }\n",
1301
- "\n",
1302
- " .dataframe tbody tr th {\n",
1303
- " vertical-align: top;\n",
1304
- " }\n",
1305
- "\n",
1306
- " .dataframe thead th {\n",
1307
- " text-align: right;\n",
1308
- " }\n",
1309
- "</style>\n",
1310
- "<table border=\"1\" class=\"dataframe\">\n",
1311
- " <thead>\n",
1312
- " <tr style=\"text-align: right;\">\n",
1313
- " <th></th>\n",
1314
- " <th>Model</th>\n",
1315
- " <th>True Negatives</th>\n",
1316
- " <th>False Positives</th>\n",
1317
- " <th>False Negatives</th>\n",
1318
- " <th>True Positives</th>\n",
1319
- " </tr>\n",
1320
- " </thead>\n",
1321
- " <tbody>\n",
1322
- " <tr>\n",
1323
- " <th>0</th>\n",
1324
- " <td>random_forest</td>\n",
1325
- " <td>366</td>\n",
1326
- " <td>0</td>\n",
1327
- " <td>26</td>\n",
1328
- " <td>0</td>\n",
1329
- " </tr>\n",
1330
- " </tbody>\n",
1331
- "</table>\n",
1332
- "</div>"
1333
- ],
1334
- "text/plain": [
1335
- " Model True Negatives False Positives False Negatives \\\n",
1336
- "0 random_forest 366 0 26 \n",
1337
- "\n",
1338
- " True Positives \n",
1339
- "0 0 "
1340
- ]
1341
- },
1342
- "metadata": {},
1343
- "output_type": "display_data"
1344
- },
1345
- {
1346
- "data": {
1347
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUcAAAFMCAYAAABYjn6oAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAr6ElEQVR4nO3deVxU5f4H8M8IDKDAiBAgCogbIpsIV8R9BZdUyszMfffmvucvl1tm7mFqmOWClLfUzDLzZu6KLCWCK6KmLMIgyirG5nB+f5ijI0djZDkz+Hm/XrzunWfOefjOvPDTM+d55jkyQRAEEBGRhlpSF0BEpIsYjkREIhiOREQiGI5ERCIYjkREIhiOREQiGI5ERCIMpS6gIkpLS5GWlgZzc3PIZDKpyyEiPSAIAu7fvw97e3vUqvX88aFeh2NaWhocHBykLoOI9FBKSgoaNmz43Of1OhzNzc0BAPKWIyEzkEtcDema5BNrpC6BdND9vDw0dXZQ58fz6HU4Pv4oLTOQMxypDAsLC6lLIB32T5fiOCFDRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguGow8YP6oDfdy3AndOrcef0apzYMRsB7VtqHOPibIs96yYi/dRqZISvwckds+FgZ6lxjJ+nM/63eSruRayF8tQqHPpqOkyMjarzpZCENm8KQYtmzqhrZoJ2bXwQHn5a6pL0gqHUBdDzpd7JwaINP+HP5HsAgGH9/LAneALavrMC8TfT4dzQGke3zcKOHyPw8aZfkJtfgBbOdigsKlH34efpjJ82voc123/DrJV7UPxQBc/mDVBaKkj1sqga7dm9C3Nnz8BnG0Lg3649tny1GUGv98a5C1fg6OgodXk6TSYIgt7+K8nLy4NCoYCxx3jIDORSl1MtUk+sxP+t+xE7foxE2IrRKClRYeyisOcef3LHbByNvoqPQn6pxip1Q/YfG6UuQXId2/nB27s11n++Sd3WysMV/foHYemy5RJWJp28vDzYWimQm5sLCwuL5x7Hj9V6olYtGQYF+qCOqRzRF25BJpOhVwc3XE/OwP7PJyPp6HKcCpuDfl081ee8ZmmGNp7OuJuVj+Ohs5B45BP8tmU62rVqLOEroepSXFyM2HMx6N4zQKO9e48AREVGSFSV/mA46ji3pva4e2YtcqPXYf0HgzF49le4ejMdNvXMYF7HBHNG98ThiCvo9++N2H/8PL5bOw4dfJoCAJwbWgMAPpjYB9t+iMCAySGIi0/Bwc1T0cTxNSlfFlWDe/fuQaVSwcbGVqPd1tYWd+6kS1SV/uA1Rx13LfEO/N5ZjrrmtRHUvRW++mg4AsZ9htz7BQCAAycuYsPO4wCAC9dS4efVGOPf6oDwmBuoVUsGANi6Nxxf748CAJxPuI0ubVwwcoA/Fm/YL82Lomolk8k0HguCUKaNyuLIUceVPFThZso9nLuSjMUb9uPitVRMHtIF97LzUVKiQvxNpcbxCTfT1bPVyrt5AID4m5qjhIRb6WVmtKnmsba2hoGBQZlRYkZGRpnRJJXFcNQzMshgLDdEyUMVYq4kobmT5h95MycbJCuzAQBJaZlIy8hB80Y2Gsc0dbJBsjKr2momacjlcni39sGxI4c12o8dPYy2/u0kqkp/8GO1DvtwSj/8duYKUtKzYV7HBIMCfdDJtxn6Tw4BAATvOIKvV45B+LkbOHn2GgLatUSfTu4IHP+Zuo/gHUewcFJfXLyWivMJtzGsnx9cGtni3blbpXpZVI2mzZiFsaOGo7WPL/za+mPrli+RkpyMcRMmSV2azmM46jAbK3Ns/XgE7KwtkJtfiEvXU9F/cgiORV8FAOw/fgFTl32HuWMCsHbeW7iWlIEhc7cgIu6muo+N/z0BE2MjrJo9EJaK2rh4LRWv/3sjbt2+J9XLomo06O3ByMrMxCfLPkK6Ugk3N3f8+PNBODk5SV2azpN8nWNISAhWr14NpVIJNzc3rFu3Dh07dizXua/iOkcqP65zJDF6sc5x165dmDFjBj744APExsaiY8eO6N27N5KTk6Usi4hI2nD89NNPMXbsWIwbNw6urq5Yt24dHBwcsGnTpn8+mYioCkkWjsXFxYiJiUFAgObq/YCAAEREiK/eLyoqQl5ensYPEVFVkCwcH6/et7Utu3o/PV189f7y5cuhUCjUPw4ODtVRKhG9giRf56jN6v0FCxYgNzdX/ZOSklIdJRLRK0iycHy8ev/ZUWJGRkaZ0eRjxsbGsLCw0PipCeop6iDp6HI41q8naR1uTe1x49elqG3CmX9dkZmZCUd7GyQlJkpax6WLF9GkUUM8ePBA0jqqk2ThKJfL4ePjg8OHNVfvHz58GO3avVqr9+eOCcDBUxfV31pZM3cgzuych5zoYER99365+pAbGeLT+YOQcmwF7kWsxZ51E9HApq7GMXXNTbF16Qikn1qN9FOrsXXpCCjMTNXPX76RhrOXkjB1WNdKe21UMatXLkefvv3g1KgRACA5ORkDg/rBSlEHDe2sMWvGNBQXF7+wj6KiIsycPhUN7axhpaiDt97oj9u3b2sck52djTEjh8PWSgFbKwXGjByOnJwc9fPuHh7w/VcbbPgsuLJfos6S9GP1rFmzsGXLFmzbtg3x8fGYOXMmkpOTMWnSq7N638TYCCOD/BG6L1LdJpPJEPZTFL7/7Vy5+1k9dyD6d/XEiAXb0X10MMxM5di7fpJ68wkACF0+Cp4uDTFgSggGTAmBp0tDbP14hEY/YfujMGFQR43zSBoFBQXYsX0rRo0ZBwBQqVR4s39fPHjwAEdPhCNs53f4cd9ezJ87+4X9zJ01A/t/2oewnd/h6Ilw5OfnY+CA16FSqdTHjBr+Li6cj8NPB37FTwd+xYXzcRg7arhGPyNGjsaXmzdpnFeTSfoNmcGDByMzMxMfffQRlEol3N3dcfDgq7V6P7B9SzxUqRB94Za6bfaq7wEA1pZ94N6swT/2YWFmglFB/hi7MAzHoxMAAGMWhuH6/5aim18LHImMh4uzLQLbu6HT8NX441ISAGDy0v/iZNgcNHOywfWkDADA4Yh41FPUQUefZjj5x7XKfrmkhUO//g+GhoZo6+8PADhy+DfEx1/B9YMpsLe3BwCsWLUWE8aOwodLl4leZsrNzUXo9q3YGvo1unXvAQDYtuMbNHN2wLGjR9AzIBBX4+Px26FfcTI8Cm38/AAAn3/xFbp09Me1hAQ0d3EBAPQMCERWZiZOnzqJLl27VcdbICnJJ2Tee+89JCYmoqioCDExMejUqZPUJVWrDq2b4tyVii1693Z1hNzIEEci49Vtyru5uPxnGtp6OQN4dLuEnPt/qYMRAH6/mIic+3+hrdeTzW9LHqpw8Voq2ns3qVBNVHHhp0+htY+v+nF0VCTc3NzVwQg8CqyioiLEnosR7SP2XAxKSkrQ46kNb+3t7eHm5q7e8DY6KhIKhUIdjADg17YtFAqFxqa4crkcHp5eOPOK3ING8nB81TnZ14Pybm6F+rCzskBRcQly/t7j8bGMzPuwtXo0mrC1ssDdrPwy597NyoetteaIIy0jB072VhWqiSouKSkR9es/CcI76emweWay0tLSEnK5/LnL39LT0yGXy2FpqblFnY2tLe78fc6dO+l4zcamzLmv2diU2e7MvkEDySeHqgvDUWImxnIUFj2skr5lMhme/uK82NfoZTIAz7QXFJWgtgnvTii1woICmJiYaLSJLXN7mc1rnz3nef3imXZTE1P8VfCXVr9LXzEcJZaZkw9Li9oV6iM9Mw/GciPUNTfVaH+tnhkyMh99i+hOZh5srMzLnGttaYY7mfc12iwVtXEvu+wok6qXlZU1snOy1Y9t7ezUo73HsrOzUVJS8tzlb3Z2diguLkZ2drZG+92MDPUo1NbWDhl37pQ5997du7B9ZlPc7OwsWFu/GrfYYDhK7PzV22jR2K5CfcTGJ6O45CG6t22hbrOztoBbE3tEnX800RN94RbqmteGr9uTya5/uTuhrnltRJ2/qdGfWxN7xCVoLvWg6ufl7Y2rV66oH/u19cfly5egVD7Z/f3I4d9gbGwM79Y+on14t/aBkZERjj614a1SqcTly5fUG976tfVHbm4u/vj9d/Uxv0dHIzc3t8ymuJcvX0KrVt6V8vp0HcNRYocj49GycX2NUV9jB2t4Nm8AW2sLmBobwbN5A3g2bwAjQwMAgP1rCsT9sFAddHn5hQj9MRIrZr2JLm2aw8ulIbZ9PBKXbqSp935MuHUHh85cxueLh6CNRyO08WiEzxe9i19OXlTPVAOAY/16sLdR4Pjf55F0evYMxJUrl9Wjvh49A+Dq2hJjRw1HXGwsjh87igXz52D02PHqmerU1FR4ubdQB51CocCo0WPx/rzZOH7sKOJiYzFm5DC4u3uoZ69buLoiILAXJk8aj+ioKERHRWHypPHo0/d19Uw1ACQlJiItNRVd/z6vpuNmtxK7fCMN5+KTMTCgNbbuPQMA2LR4KDr5NlMfE71rAQDApc9iJCuzYGhoABdnO5g+9U2WeWv2QqUqxTcrx8LU2AjHf0/AhOlfo7T0yfXE0f+3A2vnvYWfQyYDAH45eREzV+zRqOft3r44EnlVfasFko67hwda+/hi757dGDdhIgwMDPDD/l8wY+p76Na5PUxNTfH2O+9ixao16nMelpTgWkICCp66LrhqbTAMDA0xbMjbKCgoQNdu3fHl1lAYGBioj9kethOzZ0xDvz6PZrX7vt4fwes198Pcvetb9OgZ8MostZN8s9uKqCmb3QZ2aInlM9+Az1ufiE6aVBe5kSEu/bQYIxeEIvKZj9r6qCZsdvvr/w5iwfw5iIm7hFq1pPugV1RUBHfXZtjx9bdo1769ZHVUhvJudsuRow44FH4FTR1s0MBGgdt3ciSrw7F+PazceqhGBGNN0at3H9y4fh2pqamS7kKVnJSE+e9/oPfBqA2OHKnGqgkjR6p8enGbBCIiXcVwJCISwXAkIhLBcCQiEsFwJCISwXAkIhLBcCQiEsFwJCISwXAkIhLBcCQiEsFwJCISwXAkIhLBcCQiEsFwJCISwXAkIhLBcCQiEsFwJCISwXAkIhLBcCQiEsFwJCISUa67D+7fv7/cHfbv3/+liyEi0hXlCsegoKBydSaTyaBSqSpSDxGRTihXOJaWllZ1HUREOqVC1xwLCwsrqw4iIp2idTiqVCosXboUDRo0gJmZGW7evAkAWLRoEbZu3VrpBRIRSUHrcFy2bBlCQ0OxatUqyOVydbuHhwe2bNlSqcUREUlF63AMCwvDl19+iaFDh8LAwEDd7unpiatXr1ZqcUREUtE6HFNTU9G0adMy7aWlpSgpKamUooiIpKZ1OLq5ueH06dNl2vfs2QNvb+9KKYqISGrlWsrztCVLlmD48OFITU1FaWkpfvjhByQkJCAsLAwHDhyoihqJiKqd1iPHfv36YdeuXTh48CBkMhkWL16M+Ph4/Pzzz+jZs2dV1EhEVO20HjkCQGBgIAIDAyu7FiIinfFS4QgAZ8+eRXx8PGQyGVxdXeHj41OZdRERSUrrcLx9+zaGDBmCM2fOoG7dugCAnJwctGvXDt9++y0cHBwqu0Yiomqn9TXHMWPGoKSkBPHx8cjKykJWVhbi4+MhCALGjh1bFTUSEVU7rUeOp0+fRkREBFxcXNRtLi4u2LBhA9q3b1+pxRERSUXrkaOjo6PoYu+HDx+iQYMGlVIUEZHUtA7HVatWYerUqTh79iwEQQDwaHJm+vTpWLNmTaUXSEQkhXJ9rLa0tIRMJlM/fvDgAfz8/GBo+Oj0hw8fwtDQEGPGjCn3xrhERLqsXOG4bt26Ki6DiEi3lCscR44cWdV1EBHplJdeBA4ABQUFZSZnLCwsKlQQEZEu0HpC5sGDB5gyZQpsbGxgZmYGS0tLjR8ioppA63CcN28ejh07hpCQEBgbG2PLli348MMPYW9vj7CwsKqokYio2mn9sfrnn39GWFgYunTpgjFjxqBjx45o2rQpnJycsHPnTgwdOrQq6iQiqlZajxyzsrLg7OwM4NH1xaysLABAhw4dcOrUqcqtjohIIlqHY+PGjZGYmAgAaNmyJXbv3g3g0Yjy8UYURET6TutwHD16NM6fPw8AWLBggfra48yZMzF37txKL5CISApaX3OcOXOm+v937doVV69exdmzZ9GkSRN4eXlVanFERFKp0DpH4NFGFI6OjpVRCxGRzihXOK5fv77cHU6bNu2liyEi0hXlCsfg4OBydSaTyRiORFQjlCscb926VdV1EBHpFK1nq4mIXgUMRyIiEQxHIiIRDEciIhEMRyIiES8VjqdPn8awYcPg7++P1NRUAMDXX3+N8PDwSi2OiEgqWofj3r17ERgYCFNTU8TGxqKoqAgAcP/+fXzyySeVXiARkRS0DsePP/4YX3zxBb766isYGRmp29u1a4dz585VanFERFLROhwTEhLQqVOnMu0WFhbIycmpjJqIiCSndTjWr18fN27cKNMeHh6Oxo0bV0pRRERS0zocJ06ciOnTpyM6OhoymQxpaWnYuXMn5syZg/fee68qaiQiqnZab1k2b9485ObmomvXrigsLESnTp1gbGyMOXPmYMqUKVVRIxFRtZMJgiC8zIl//fUXrly5gtLSUrRs2RJmZmaVXds/ysvLg0KhgLHHeMgM5NX++0m3Zf+xUeoSSAfl5eXB1kqB3NxcWFhYPPe4l97stnbt2vD19X3Z04mIdJrW4di1a1fIZLLnPn/s2LEKFUREpAu0DsdWrVppPC4pKUFcXBwuXbqEkSNHVlZdRESS0jocn7cr+H/+8x/k5+dXuCAiIl1QaRtPDBs2DNu2baus7oiIJFVp4RgZGQkTE5PK6o6ISFJaf6x+8803NR4LggClUomzZ89i0aJFlVYYEZGUtA5HhUKh8bhWrVpwcXHBRx99hICAgEorjIhISlqFo0qlwqhRo+Dh4YF69epVVU1ERJLT6pqjgYEBAgMDkZubW1X1EBHpBK0nZDw8PHDz5s2qqIWISGdoHY7Lli3DnDlzcODAASiVSuTl5Wn8EBHVBFpPyPTq1QsA0L9/f42vEQqCAJlMBpVKVXnVERFJROtwPH78eFXUQUSkU7QOR2dnZzg4OJTZfEIQBKSkpFRaYUREUtL6mqOzszPu3r1bpj0rKwvOzs6VUhQRkdS0DsfH1xaflZ+fz68PElGNUe6P1bNmzQIAyGQyLFq0CLVr11Y/p1KpEB0dXWY7MyIifVXucIyNjQXwaOR48eJFyOVPbksgl8vh5eWFOXPmVH6FREQSKHc4Pp6lHj16ND777LMX3nuBiEjfaT1bvX379qqog4hIp1Tafo5ERDUJw5GISATDkYhIBMORiEgEw5GISATDkYhIBMORiEgEw5GISATDkYhIBMORiEgEw5GISATDkYhIBMORiEgEw5GISATDkYhIBMORiEgEw5GISATDkYhIBMNRz8wZE4Dwb+YiI3wNko4ux+5Px6OZk02Z41ycbbFn3USkn1qNjPA1OLljNhzsLCWomKS2eVMIWjRzRl0zE7Rr44Pw8NNSl6QXGI56pmPrpvhi1yl0HrEGr/97IwwMDHBg0xTUNnlyN0jnhtY4um0Wrt1KR+D4z9Bm8HIs/+pXFBaVSFg5SWHP7l2YO3sG5r//AaL+iEW7Dh0R9HpvJCcnS12azpMJgiBIXcTLysvLg0KhgLHHeMgM5P98Qg1kbWmGlGMr0GNsMM6c+xMAELZiNEpKVBi7KEzi6qSV/cdGqUuQXMd2fvD2bo31n29St7XycEW//kFYumy5hJVJJy8vD7ZWCuTm5r7wLqocOeo5CzMTAEB27l8AAJlMhl4d3HA9OQP7P5+MpKPLcSpsDvp18ZSyTJJAcXExYs/FoHvPAI327j0CEBUZIVFV+oPhqOdWzh6IM+du4MqfSgCATT0zmNcxwZzRPXE44gr6/Xsj9h8/j+/WjkMHn6YSV0vV6d69e1CpVLCxsdVot7W1xZ076RJVpT+0vm816Y7g99+GRzN7dB8drG6rVevRf+8OnLiIDTuPAwAuXEuFn1djjH+rA8JjbkhSK0lHJpNpPBYEoUwblcWRo576dP4gvN7ZA4Hj1yM1I0fdfi87HyUlKsTfVGocn3AznbPVrxhra2sYGBiUGSVmZGSUGU1SWQxHPRQ8fxAGdPNCr4nrkZSWqfFcyUMVYq4kobmT5h9/MycbJCuzq7NMkphcLod3ax8cO3JYo/3Y0cNo699Ooqr0Bz9W65l1C97G4N6+GDTzS+Q/KIStlTkAIDe/UL1UJ3jHEXy9cgzCz93AybPXENCuJfp0ckfg+M+kLJ0kMG3GLIwdNRytfXzh19YfW7d8iZTkZIybMEnq0nQel/LomYJY8eUp4xd/jW9+jlY/HjGgLeaOCUADm7q4lpSBj7/4BQdOXKyuMnUCl/I8snlTCD5duwrpSiXc3Nyxam0wOnTsJHVZkinvUh5Jw/HUqVNYvXo1YmJioFQqsW/fPgQFBZX7/FcxHKn8GI4kRi/WOT548ABeXl7YuJF/xESkWyS95ti7d2/07t273McXFRWhqKhI/TgvL68qyiIi0q/Z6uXLl0OhUKh/HBwcpC6JiGoovQrHBQsWIDc3V/2TkpIidUlEVEPp1VIeY2NjGBsbS10GEb0C9GrkWFPVU9RB0tHlcKxfT9I63Jra48avSzW2PyNpZWZmwtHeBkmJiZLWceniRTRp1BAPHjyQtI7qxHDUAXPHBODgqYtIVmYBANbMHYgzO+chJzoYUd+9X64+5EaG+HT+IKQcW4F7EWuxZ91ENLCpq3FMXXNTbF06AumnViP91GpsXToCCjNT9fOXb6Th7KUkTB3WtdJeG1XM6pXL0advPzg1agQASE5OxsCgfrBS1EFDO2vMmjENxcXFL+yjqKgIM6dPRUM7a1gp6uCtN/rj9u3bGsdkZ2djzMjhsLVSwNZKgTEjhyMnJ0f9vLuHB3z/1QYbPgvGq0LScMzPz0dcXBzi4uIAALdu3UJcXNwrtRGnibERRgb5I3RfpLpNJpMh7KcofP/buXL3s3ruQPTv6okRC7aj++hgmJnKsXf9JNSq9WSDgdDlo+Dp0hADpoRgwJQQeLo0xNaPR2j0E7Y/ChMGddQ4j6RRUFCAHdu3YtSYcQAAlUqFN/v3xYMHD3D0RDjCdn6HH/ftxfy5s1/Yz9xZM7D/p30I2/kdjp4IR35+PgYOeB0qlUp9zKjh7+LC+Tj8dOBX/HTgV1w4H4exo4Zr9DNi5Gh8uXmTxnk1maThePbsWXh7e8Pb2xsAMGvWLHh7e2Px4sVSllWtAtu3xEOVCtEXbqnbZq/6Hpt3n8Kt25kvOPMJCzMTjAryx/uf7sPx6AScT7iNMQvD4N7UHt38WgB4dNuEwPZueO+jnYi+cAvRF25h8tL/om9nD43bLByOiEc9RR109GlWuS+UtHbo1//B0NAQbf39AQBHDv+G+Pgr2LbjG7Ty9ka37j2wYtVabN/61XOXteXm5iJ0+1asWLUW3br3QCtvb2zb8Q0uXbqIY0ePAACuxsfjt0O/ImTzFrT190dbf398/sVXOPjLAVxLSFD31TMgEFmZmTh96mTVv3gdIGk4dunSBYIglPkJDQ2Vsqxq1aF1U5y7UrGRsrerI+RGhjgSGa9uU97NxeU/09DWyxkA4OfpjJz7f+GPS0nqY36/mIic+3+hrVdjdVvJQxUuXktFe+8mFaqJKi789Cm09vFVP46OioSbmzvs7e3VbT0DAlFUVITYczGifcSei0FJSQl6PLXhrb29Pdzc3NUb3kZHRUKhUKCNn5/6GL+2baFQKDQ2xZXL5fDw9MKZV+QeNLzmKDEn+3pQ3s2tUB92VhYoKi5Bzv0CjfaMzPuwtXr09ShbKwvczcovc+7drHzYWmt+hSotIwdO9lYVqokqLikpEfXrPwnCO+npsLHV3G3J0tIScrkc6enim9emp6dDLpfD0lJzuzobW1vc+fucO3fS8ZpN2Zu0vWZjU2a7M/sGDSSfHKouDEeJmRjLUVj0sEr6lslkePqL82Jfo5fJADzTXlBUgtomRlVSE5VfYUEBTExMNNrENql9mc1rnz3nef3imXZTE1P8VfCXVr9LXzEcJZaZkw9Li9oV6iM9Mw/GciPUNTfVaH+tnhkyMh9di7qTmQebv7c3e5q1pRnuZN7XaLNU1Ma97LKjTKpeVlbWyM55sgenrZ2derT3WHZ2NkpKSmBrK755rZ2dHYqLi5GdrbmX592MDPUo1NbWDhl37pQ5997du7B9ZlPc7OwsWFu/9lKvR98wHCV2/upttGhsV6E+YuOTUVzyEN3btlC32VlbwK2JPaLOP5roib5wC3XNa8PXzUl9zL/cnVDXvDaizt/U6M+tiT3iEjSXelD18/L2xtUrV9SP/dr64/LlS1Aqn+zyfuTwbzA2NoZ3ax/RPrxb+8DIyAhHn9rwVqlU4vLlS+oNb/3a+iM3Nxd//P67+pjfo6ORm5tbZlPcy5cvoVUr70p5fbqO4Sixw5HxaNm4vsaor7GDNTybN4CttQVMjY3g2bwBPJs3gJGhAQDA/jUF4n5YqA66vPxChP4YiRWz3kSXNs3h5dIQ2z4eiUs30nAs+ioAIOHWHRw6cxmfLx6CNh6N0MajET5f9C5+OXkR15My1L/bsX492NsocPzv80g6PXsG4sqVy+pRX4+eAXB1bYmxo4YjLjYWx48dxYL5czB67Hj11lupqanwcm+hDjqFQoFRo8fi/XmzcfzYUcTFxmLMyGFwd/dAt+49AAAtXF0RENgLkyeNR3RUFKKjojB50nj06fs6mru4qOtJSkxEWmoquv59Xk2nV18frIku30jDufhkDAxoja17zwAANi0eik6+T5bSRO9aAABw6bMYycosGBoawMXZDqZPfZNl3pq9UKlK8c3KsTA1NsLx3xMwYfrXKC19cj1x9P/twNp5b+HnkMkAgF9OXsTMFXs06nm7ty+ORF7lLRV0gLuHB1r7+GLvnt0YN2EiDAwM8MP+XzBj6nvo1rk9TE1N8fY772LFqjXqcx6WlOBaQgIKnrouuGptMAwMDTFsyNsoKChA127d8eXWUBgYGKiP2R62E7NnTEO/Po9mtfu+3h/B6zW3Ety961v06BkAJycnvAq4E7gOCOzQEstnvgGftz4RnTSpLnIjQ1z6aTFGLghF5DMftfVRTdjs9tf/HcSC+XMQE3dJfWdJKRQVFcHdtRl2fP0t2rVvL1kdlaG8m91y5KgDDoVfQVMHGzSwUeD2nRzJ6nCsXw8rtx6qEcFYU/Tq3Qc3rl9HamqqpFv0JSclYf77H+h9MGqDI0eqsWrCyJEqn17cJoGISFcxHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRDAciYhEMByJiEQwHImIRBhKXUBFCILw6H9VxRJXQrooLy9P6hJIB93/++/icX48j0z4pyN02O3bt+Hg4CB1GUSkh1JSUtCwYcPnPq/X4VhaWoq0tDSYm5tDJpNJXY7k8vLy4ODggJSUFFhYWEhdDukI/l1oEgQB9+/fh729PWrVev6VRb3+WF2rVq0XJv+rysLCgv8IqAz+XTyhUCj+8RhOyBARiWA4EhGJYDjWIMbGxliyZAmMjY2lLoV0CP8uXo5eT8gQEVUVjhyJiEQwHImIRDAciYhEMByJiEQwHGuIkJAQODs7w8TEBD4+Pjh9+rTUJZHETp06hX79+sHe3h4ymQw//vij1CXpFYZjDbBr1y7MmDEDH3zwAWJjY9GxY0f07t0bycnJUpdGEnrw4AG8vLywceNGqUvRS1zKUwP4+fmhdevW2LRpk7rN1dUVQUFBWL58uYSVka6QyWTYt28fgoKCpC5Fb3DkqOeKi4sRExODgIAAjfaAgABERERIVBWR/mM46rl79+5BpVLB1tZWo93W1hbp6ekSVUWk/xiONcSzW7YJgsBt3IgqgOGo56ytrWFgYFBmlJiRkVFmNElE5cdw1HNyuRw+Pj44fPiwRvvhw4fRrl07iaoi0n96vdktPTJr1iwMHz4cvr6+8Pf3x5dffonk5GRMmjRJ6tJIQvn5+bhx44b68a1btxAXF4d69erB0dFRwsr0A5fy1BAhISFYtWoVlEol3N3dERwcjE6dOkldFknoxIkT6Nq1a5n2kSNHIjQ0tPoL0jMMRyIiEbzmSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UiSatSoEdatW6d+LNV2/v/5z3/QqlWr5z5/4sQJyGQy5OTklLvPLl26YMaMGRWqKzQ0FHXr1q1QH/RyGI6kU5RKJXr37l2uY/8p0IgqghtPUIUVFxdDLpdXSl92dnaV0g9RRXHkSBq6dOmCKVOmYMqUKahbty6srKywcOFCPP0V/EaNGuHjjz/GqFGjoFAoMH78eABAREQEOnXqBFNTUzg4OGDatGl48OCB+ryMjAz069cPpqamcHZ2xs6dO8v8/mc/Vt++fRvvvPMO6tWrhzp16sDX1xfR0dEIDQ3Fhx9+iPPnz0Mmk0Emk6k3U8jNzcWECRNgY2MDCwsLdOvWDefPn9f4PStWrICtrS3Mzc0xduxYFBYWavU+ZWZmYsiQIWjYsCFq164NDw8PfPvtt2WOe/jw4Qvfy+LiYsybNw8NGjRAnTp14OfnhxMnTmhVC1UNhiOVsWPHDhgaGiI6Ohrr169HcHAwtmzZonHM6tWr4e7ujpiYGCxatAgXL15EYGAg3nzzTVy4cAG7du1CeHg4pkyZoj5n1KhRSExMxLFjx/D9998jJCQEGRkZz60jPz8fnTt3RlpaGvbv34/z589j3rx5KC0txeDBgzF79my4ublBqVRCqVRi8ODBEAQBffv2RXp6Og4ePIiYmBi0bt0a3bt3R1ZWFgBg9+7dWLJkCZYtW4azZ8+ifv36CAkJ0eo9KiwshI+PDw4cOIBLly5hwoQJGD58OKKjo7V6L0ePHo0zZ87gu+++w4ULFzBo0CD06tUL169f16oeqgIC0VM6d+4suLq6CqWlpeq2+fPnC66ururHTk5OQlBQkMZ5w4cPFyZMmKDRdvr0aaFWrVpCQUGBkJCQIAAQoqKi1M/Hx8cLAITg4GB1GwBh3759giAIwubNmwVzc3MhMzNTtNYlS5YIXl5eGm1Hjx4VLCwshMLCQo32Jk2aCJs3bxYEQRD8/f2FSZMmaTzv5+dXpq+nHT9+XAAgZGdnP/eYPn36CLNnz1Y//qf38saNG4JMJhNSU1M1+unevbuwYMECQRAEYfv27YJCoXju76Sqw2uOVEbbtm017j/j7++PtWvXQqVSwcDAAADg6+urcU5MTAxu3Lih8VFZEASUlpbi1q1buHbtGgwNDTXOa9GixQtnYuPi4uDt7Y169eqVu/aYmBjk5+fDyspKo72goAB//vknACA+Pr7MRsD+/v44fvx4uX+PSqXCihUrsGvXLqSmpqKoqAhFRUWoU6eOxnEvei/PnTsHQRDQvHlzjXOKiorK1E/Vj+FIL+XZECgtLcXEiRMxbdq0Msc6OjoiISEBQNkbgb2Iqamp1nWVlpaifv36otftKnNJzNq1axEcHIx169bBw8MDderUwYwZM1BcXKxVrQYGBoiJiVH/R+cxMzOzSquVXg7DkcqIiooq87hZs2Zl/gE/rXXr1rh8+TKaNm0q+ryrqysePnyIs2fPok2bNgCAhISEF64b9PT0xJYtW5CVlSU6epTL5VCpVGXqSE9Ph6GhIRo1avTcWqKiojBixAiN16iN06dPY8CAARg2bBiAR0F3/fp1uLq6ahz3ovfS29sbKpUKGRkZ6Nixo1a/n6oeJ2SojJSUFMyaNQsJCQn49ttvsWHDBkyfPv2F58yfPx+RkZGYPHky4uLicP36dezfvx9Tp04FALi4uKBXr14YP348oqOjERMTg3Hjxr1wdDhkyBDY2dkhKCgIZ86cwc2bN7F3715ERkYCeDRr/vi+KPfu3UNRURF69OgBf39/BAUF4dChQ0hMTERERAQWLlyIs2fPAgCmT5+Obdu2Ydu2bbh27RqWLFmCy5cva/UeNW3aFIcPH0ZERATi4+MxceJE0fuEv+i9bN68OYYOHYoRI0bghx9+wK1bt/DHH39g5cqVOHjwoFb1UBWQ+qIn6ZbOnTsL7733njBp0iTBwsJCsLS0FN5//32NSQUnJyeNSZTHfv/9d6Fnz56CmZmZUKdOHcHT01NYtmyZ+nmlUin07dtXMDY2FhwdHYWwsLAyfeGpCRlBEITExERh4MCBgoWFhVC7dm3B19dXiI6OFgRBEAoLC4WBAwcKdevWFQAI27dvFwRBEPLy8oSpU6cK9vb2gpGRkeDg4CAMHTpUSE5OVve7bNkywdraWjAzMxNGjhwpzJs3T6sJmczMTGHAgAGCmZmZYGNjIyxcuFAYMWKEMGDAAK3ey+LiYmHx4sVCo0aNBCMjI8HOzk544403hAsXLgiCwAkZKfEeMqShS5cuaNWqlcZX+oheRfxYTUQkguFIRCSCH6uJiERw5EhEJILhSEQkguFIRCSC4UhEJILhSEQkguFIRCSC4UhEJILhSEQk4v8B7YgKY252X9IAAAAASUVORK5CYII=",
1348
- "text/plain": [
1349
- "<Figure size 350x350 with 1 Axes>"
1350
- ]
1351
- },
1352
- "metadata": {},
1353
- "output_type": "display_data"
1354
- }
1355
- ],
1356
- "source": [
1357
- "evaluation_score_output, evaluation_counts_output = evaluate_models(input_model)\n",
1358
- "\n",
1359
- "# check if the model has already been evaluated and if not, append the results to the dataframe\n",
1360
- "\n",
1361
- "evaluation_score_df = pd.concat([evaluation_score_output, evaluation_score_df], ignore_index=True) \n",
1362
- "display(pd.DataFrame(evaluation_score_output))\n",
1363
- "\n",
1364
- "evaluation_count_df = pd.concat([evaluation_counts_output, evaluation_count_df], ignore_index=True) \n",
1365
- "display(pd.DataFrame(evaluation_counts_output))\n",
1366
- "\n",
1367
- "from mlxtend.plotting import plot_confusion_matrix\n",
1368
- "\n",
1369
- "# select the model index and filter the row from evaluation_count_df dataframe\n",
1370
- "model_index = 0\n",
1371
- "\n",
1372
- "selected_model = evaluation_count_df[evaluation_count_df.index == model_index]\n",
1373
- "\n",
1374
- "# create a np.array with selected_model values\n",
1375
- "\n",
1376
- "\n",
1377
- "conf_matrix = np.array([[selected_model['True Negatives'].values[0], selected_model['False Positives'].values[0]],\n",
1378
- " [selected_model['False Negatives'].values[0], selected_model['True Positives'].values[0]]])\n",
1379
- "\n",
1380
- "#change the size of the graph\n",
1381
- "\n",
1382
- "plt.rcParams['figure.figsize'] = [3.5, 3.5]\n",
1383
- "\n",
1384
- "fig, ax = plot_confusion_matrix(\n",
1385
- " conf_mat=conf_matrix,\n",
1386
- " show_absolute=True,\n",
1387
- " show_normed=True\n",
1388
- ")"
1389
- ]
1390
- },
1391
- {
1392
- "attachments": {},
1393
- "cell_type": "markdown",
1394
- "metadata": {},
1395
- "source": [
1396
- "#### **Plot Evaluation**"
1397
- ]
1398
- }
1399
- ],
1400
- "metadata": {
1401
- "kernelspec": {
1402
- "display_name": "base",
1403
- "language": "python",
1404
- "name": "python3"
1405
- },
1406
- "language_info": {
1407
- "codemirror_mode": {
1408
- "name": "ipython",
1409
- "version": 3
1410
- },
1411
- "file_extension": ".py",
1412
- "mimetype": "text/x-python",
1413
- "name": "python",
1414
- "nbconvert_exporter": "python",
1415
- "pygments_lexer": "ipython3",
1416
- "version": "3.9.16"
1417
- },
1418
- "orig_nbformat": 4
1419
- },
1420
- "nbformat": 4,
1421
- "nbformat_minor": 2
1422
- }