antitheft159 commited on
Commit
8c93f11
·
verified ·
1 Parent(s): 045194b

Upload 1046_159.py

Browse files
Files changed (1) hide show
  1. 1046_159.py +227 -0
1046_159.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """1046.159
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1uxVrUlNk5jB6t_CKcD_4ZvpDnJfiKqyu
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ import matplotlib.pyplot as plt
13
+ import time
14
+ import seaborn as sns
15
+ import warnings
16
+
17
+ warnings.filterwarnings('ignore')
18
+
19
+ data = pd.read_csv('/content/Credit_Data.csv')
20
+
21
+ data.head()
22
+
23
+ data.drop('ID',axis=1,inplace=True)
24
+
25
+ data.shape
26
+
27
+ def get_summary(df):
28
+ df_desc = pd.DataFrame(df.describe(include='all').transpose())
29
+ df_summary = pd.DataFrame({
30
+ 'dtype': df.dtypes,
31
+ '#missing': df.isnull().sum().values,
32
+ '#duplicates': df.duplicated().sum(),
33
+ '#unique': df.nunique().values,
34
+ 'min': df_desc['min'].values,
35
+ 'max': df_desc['max'].values,
36
+ 'avg': df_desc['mean'].values,
37
+ 'std dev': df_desc['std'].values,
38
+ })
39
+ return df_summary
40
+
41
+ get_summary(data).style.background_gradient()
42
+
43
+ target_col = 'Balance'
44
+ feature = data.drop('Balance', axis=1).columns
45
+
46
+ fig, ax = plt.subplots(2, 5, figsize=(20, 10))
47
+ axes = ax.flatten()
48
+
49
+ for i, col in enumerate(data[feature].columns):
50
+ sns.scatterplot(data=data, x=col, y='Balance', hue='Gender', ax=axes[i])
51
+
52
+ fig.suptitle('Interactions between Target Column and Features')
53
+ plt.tight_layout()
54
+ plt.show()
55
+
56
+ fig, ax = plt.subplots(2, 6, figsize=(20, 10))
57
+ axes = ax.flatten()
58
+
59
+ for i, col in enumerate(data.columns):
60
+ sns.histplot(data=data, x=col, hue='Gender', ax=axes[i])
61
+
62
+ fig.suptitle("Gender-Based Distribution of Financial and Demographic Features in the Dataset")
63
+ plt.tight_layout()
64
+
65
+ for ax in axes:
66
+ if not ax.has_data():
67
+ fig.delaxes(ax)
68
+
69
+ plt.show()
70
+
71
+ sns.pairplot(data, kind='scatter', diag_kind='hist', hue='Gender', palette='colorblind')
72
+
73
+ numeric_columns = data.select_dtypes(include='number').columns
74
+
75
+ fig, ax = plt.subplots(len(numeric_columns), 2, figsize=(12, len(numeric_columns)*2))
76
+ ax = ax.flatten()
77
+
78
+ for i, col in enumerate(numeric_columns):
79
+ sns.boxplot(data=data, x=col, width=0.6, ax=ax[2*i])
80
+ sns.violinplot(data=data, x=col, ax=ax[2*i + 1])
81
+
82
+ plt.tight_layout()
83
+ plt.show()
84
+
85
+ corr = data.select_dtypes(exclude='object').corr(method='spearman')
86
+ mask = np.triu(np.ones_like(corr))
87
+
88
+ sns.heatmap(corr, annot=True, mask=mask, cmap='YlGnBu',cbar=True)
89
+ plt.title('Correlation Matrix',fontdict={'color': 'blue', 'fontsize': 12})
90
+
91
+ from sklearn.preprocessing import OneHotEncoder
92
+
93
+ cat_columns = data.select_dtypes(include='O').columns.to_list()
94
+
95
+ dummie_df = pd.get_dummies(data=data[cat_columns], drop_first=True).astype('int8')
96
+
97
+ df = data.join(dummie_df)
98
+ df.drop(cat_columns,axis=1,inplace=True)
99
+
100
+ df.head()
101
+
102
+ from imblearn.over_sampling import SMOTE
103
+ from collections import Counter
104
+
105
+ X_train = df.drop('Student_Yes',axis=1)
106
+ y_train = df['Student_Yes']
107
+
108
+ sm = SMOTE(sampling_strategy='minority',random_state=14, k_neighbors=5, n_jobs=-1)
109
+ sm_X_train, sm_Y_train = sm.fit_resample(X_train,y_train)
110
+
111
+ print('Before sampling class distribution', Counter(y_train))
112
+ print('\nAfter sampling class distribution', Counter(sm_Y_train))
113
+
114
+ sm_df = pd.concat([sm_X_train,sm_Y_train],axis=1)
115
+ sm_df.head()
116
+
117
+ get_summary(sm_df).style.background_gradient()
118
+
119
+ !pip install ydata_profiling
120
+
121
+ from ydata_profiling import ProfileReport
122
+
123
+ profile_report = ProfileReport(
124
+ sm_df,
125
+ sort=None,
126
+ progress_bar=False,
127
+ html = {'style': {'full_width': True}},
128
+ correlations={
129
+ "auto": {"calculate": True},
130
+ "pearson": {"calculate": False},
131
+ "spearman": {"calculate": False},
132
+ "kendall": {"calculate": False},
133
+ "phi_k": {"calculate": True},
134
+ "cramers": {"calculate": True},
135
+ },
136
+ explorative=True,
137
+ title="Profiling Report"
138
+ )
139
+
140
+ profile_report.to_file('output.html')
141
+
142
+ from sklearn.linear_model import LinearRegression
143
+ from sklearn.model_selection import train_test_split, cross_val_score
144
+ from sklearn.preprocessing import StandardScaler
145
+ from sklearn import metrics
146
+
147
+ X = sm_df.drop('Balance',axis=1)
148
+ y = sm_df.Balance
149
+
150
+ train_x, valid_x, train_y, valid_y = train_test_split(X,y, test_size=0.2, random_state=16518, shuffle=True)
151
+
152
+ scaler = StandardScaler()
153
+ train_x = scaler.fit_transform(train_x)
154
+ valid_x = scaler.transform(valid_x)
155
+
156
+ lm = LinearRegression()
157
+ history = lm.fit(train_x, train_y)
158
+ pred = lm.predict(valid_x)
159
+ r2 = metrics.r2_score(valid_y,pred)
160
+
161
+ print('r2_score',r2)
162
+
163
+ lm_df = pd.DataFrame(history.coef_.T, index= X.columns, columns=['coef_'])
164
+
165
+ lm_df.loc['intercept_'] = lm.intercept_
166
+
167
+ lm_df.sort_values(by='coef_')
168
+
169
+ plt.barh(y= lm_df.index, width='coef_', data=lm_df)
170
+ plt.show()
171
+
172
+ from sklearn.model_selection import train_test_split, cross_val_score, KFold
173
+ from sklearn.preprocessing import StandardScaler
174
+ from sklearn.preprocessing import PolynomialFeatures
175
+ from sklearn import metrics
176
+
177
+ X = sm_df.drop('Balance',axis=1)
178
+ y = sm_df.Balance
179
+
180
+ train_x, valid_x, train_y, valid_y = train_test_split(X,y, test_size=0.2, random_state=16518, shuffle=True)
181
+
182
+ X_trainv, X_valid, Y_trainv, Y_valid = train_test_split(train_x, train_y, test_size=0.2, random_state=16518, shuffle=True)
183
+
184
+ train_x.shape, valid_x.shape
185
+
186
+ X_trainv.shape, X_valid.shape
187
+
188
+ def create_polynomial_regression_model(degree):
189
+ "Create a polynomial regression model for the given degree"
190
+
191
+ poly_features = PolynomialFeatures(degree=degree, include_bias=False)
192
+
193
+ X_train_poly = poly_features.fit_transform(X_trainv)
194
+
195
+ poly_model = LinearRegression()
196
+ poly_model.fit(X_train_poly, Y_trainv)
197
+
198
+ y_train_predicted = poly_model.predict(X_train_poly)
199
+
200
+ y_valid_predict = poly_model.predict(poly_features.fit_transform(X_valid))
201
+
202
+ mse_train = metrics.mean_squared_error(Y_trainv, y_train_predicted)
203
+
204
+ mse_valid = metrics.mean_squared_error(Y_valid, y_valid_predict)
205
+
206
+ return (mse_train, mse_valid,degree)
207
+
208
+ a=[]
209
+ for i in range(1,8):
210
+ a.append(create_polynomial_regression_model(i))
211
+ df = pd.DataFrame(a,columns=['Train Error', 'Validation Error', 'Degree'])
212
+ df.sort_values(by='Validation Error')
213
+
214
+ scaler = StandardScaler()
215
+ train_x = scaler.fit_transform(train_x)
216
+ valid_x = scaler.transform(valid_x)
217
+
218
+ polynomial_features = PolynomialFeatures(degree=2, include_bias=False)
219
+ train_x_poly = polynomial_features.fit_transform(train_x)
220
+ valid_x_poly = polynomial_features.fit_transform(valid_x)
221
+
222
+ polymodel = LinearRegression()
223
+ polymodel.fit(train_x_poly, train_y)
224
+ pred = polymodel.predict(valid_x_poly)
225
+ r2 = metrics.r2_score(valid_y,pred)
226
+
227
+ print('r2_score:', r2)