|
import pandas as pd |
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
import time |
|
import seaborn as sns |
|
import warnings |
|
|
|
warnings.filterwarnings('ignore') |
|
|
|
data = pd.read_csv('/content/Credit_Data.csv') |
|
|
|
data.head() |
|
|
|
data.drop('ID',axis=1,inplace=True) |
|
|
|
data.shape |
|
|
|
def get_summary(df): |
|
df_desc = pd.DataFrame(df.describe(include='all').transpose()) |
|
df_summary = pd.DataFrame({ |
|
'dtype': df.dtypes, |
|
'#missing': df.isnull().sum().values, |
|
'#duplicates': df.duplicated().sum(), |
|
'#unique': df.nunique().values, |
|
'min': df_desc['min'].values, |
|
'max': df_desc['max'].values, |
|
'avg': df_desc['mean'].values, |
|
'std dev': df_desc['std'].values, |
|
}) |
|
return df_summary |
|
|
|
get_summary(data).style.background_gradient() |
|
|
|
target_col = 'Balance' |
|
feature = data.drop('Balance', axis=1).columns |
|
|
|
fig, ax = plt.subplots(2, 5, figsize=(20, 10)) |
|
axes = ax.flatten() |
|
|
|
for i, col in enumerate(data[feature].columns): |
|
sns.scatterplot(data=data, x=col, y='Balance', hue='Gender', ax=axes[i]) |
|
|
|
fig.suptitle('Interactions between Target Column and Features') |
|
plt.tight_layout() |
|
plt.show() |
|
|
|
fig, ax = plt.subplots(2, 6, figsize=(20, 10)) |
|
axes = ax.flatten() |
|
|
|
for i, col in enumerate(data.columns): |
|
sns.histplot(data=data, x=col, hue='Gender', ax=axes[i]) |
|
|
|
fig.suptitle("Gender-Based Distribution of Financial and Demographic Features in the Dataset") |
|
plt.tight_layout() |
|
|
|
for ax in axes: |
|
if not ax.has_data(): |
|
fig.delaxes(ax) |
|
|
|
plt.show() |
|
|
|
sns.pairplot(data, kind='scatter', diag_kind='hist', hue='Gender', palette='colorblind') |
|
|
|
numeric_columns = data.select_dtypes(include='number').columns |
|
|
|
fig, ax = plt.subplots(len(numeric_columns), 2, figsize=(12, len(numeric_columns)*2)) |
|
ax = ax.flatten() |
|
|
|
for i, col in enumerate(numeric_columns): |
|
sns.boxplot(data=data, x=col, width=0.6, ax=ax[2*i]) |
|
sns.violinplot(data=data, x=col, ax=ax[2*i + 1]) |
|
|
|
plt.tight_layout() |
|
plt.show() |
|
|
|
corr = data.select_dtypes(exclude='object').corr(method='spearman') |
|
mask = np.triu(np.ones_like(corr)) |
|
|
|
sns.heatmap(corr, annot=True, mask=mask, cmap='YlGnBu',cbar=True) |
|
plt.title('Correlation Matrix',fontdict={'color': 'blue', 'fontsize': 12}) |
|
|
|
from sklearn.preprocessing import OneHotEncoder |
|
|
|
cat_columns = data.select_dtypes(include='O').columns.to_list() |
|
|
|
dummie_df = pd.get_dummies(data=data[cat_columns], drop_first=True).astype('int8') |
|
|
|
df = data.join(dummie_df) |
|
df.drop(cat_columns,axis=1,inplace=True) |
|
|
|
df.head() |
|
|
|
from imblearn.over_sampling import SMOTE |
|
from collections import Counter |
|
|
|
X_train = df.drop('Student_Yes',axis=1) |
|
y_train = df['Student_Yes'] |
|
|
|
sm = SMOTE(sampling_strategy='minority',random_state=14, k_neighbors=5, n_jobs=-1) |
|
sm_X_train, sm_Y_train = sm.fit_resample(X_train,y_train) |
|
|
|
print('Before sampling class distribution', Counter(y_train)) |
|
print('\nAfter sampling class distribution', Counter(sm_Y_train)) |
|
|
|
sm_df = pd.concat([sm_X_train,sm_Y_train],axis=1) |
|
sm_df.head() |
|
|
|
get_summary(sm_df).style.background_gradient() |
|
|
|
!pip install ydata_profiling |
|
|
|
from ydata_profiling import ProfileReport |
|
|
|
profile_report = ProfileReport( |
|
sm_df, |
|
sort=None, |
|
progress_bar=False, |
|
html = {'style': {'full_width': True}}, |
|
correlations={ |
|
"auto": {"calculate": True}, |
|
"pearson": {"calculate": False}, |
|
"spearman": {"calculate": False}, |
|
"kendall": {"calculate": False}, |
|
"phi_k": {"calculate": True}, |
|
"cramers": {"calculate": True}, |
|
}, |
|
explorative=True, |
|
title="Profiling Report" |
|
) |
|
|
|
profile_report.to_file('output.html') |
|
|
|
from sklearn.linear_model import LinearRegression |
|
from sklearn.model_selection import train_test_split, cross_val_score |
|
from sklearn.preprocessing import StandardScaler |
|
from sklearn import metrics |
|
|
|
X = sm_df.drop('Balance',axis=1) |
|
y = sm_df.Balance |
|
|
|
train_x, valid_x, train_y, valid_y = train_test_split(X,y, test_size=0.2, random_state=16518, shuffle=True) |
|
|
|
scaler = StandardScaler() |
|
train_x = scaler.fit_transform(train_x) |
|
valid_x = scaler.transform(valid_x) |
|
|
|
lm = LinearRegression() |
|
history = lm.fit(train_x, train_y) |
|
pred = lm.predict(valid_x) |
|
r2 = metrics.r2_score(valid_y,pred) |
|
|
|
print('r2_score',r2) |
|
|
|
lm_df = pd.DataFrame(history.coef_.T, index= X.columns, columns=['coef_']) |
|
|
|
lm_df.loc['intercept_'] = lm.intercept_ |
|
|
|
lm_df.sort_values(by='coef_') |
|
|
|
plt.barh(y= lm_df.index, width='coef_', data=lm_df) |
|
plt.show() |
|
|
|
from sklearn.model_selection import train_test_split, cross_val_score, KFold |
|
from sklearn.preprocessing import StandardScaler |
|
from sklearn.preprocessing import PolynomialFeatures |
|
from sklearn import metrics |
|
|
|
X = sm_df.drop('Balance',axis=1) |
|
y = sm_df.Balance |
|
|
|
train_x, valid_x, train_y, valid_y = train_test_split(X,y, test_size=0.2, random_state=16518, shuffle=True) |
|
|
|
X_trainv, X_valid, Y_trainv, Y_valid = train_test_split(train_x, train_y, test_size=0.2, random_state=16518, shuffle=True) |
|
|
|
train_x.shape, valid_x.shape |
|
|
|
X_trainv.shape, X_valid.shape |
|
|
|
def create_polynomial_regression_model(degree): |
|
"Create a polynomial regression model for the given degree" |
|
|
|
poly_features = PolynomialFeatures(degree=degree, include_bias=False) |
|
|
|
X_train_poly = poly_features.fit_transform(X_trainv) |
|
|
|
poly_model = LinearRegression() |
|
poly_model.fit(X_train_poly, Y_trainv) |
|
|
|
y_train_predicted = poly_model.predict(X_train_poly) |
|
|
|
y_valid_predict = poly_model.predict(poly_features.fit_transform(X_valid)) |
|
|
|
mse_train = metrics.mean_squared_error(Y_trainv, y_train_predicted) |
|
|
|
mse_valid = metrics.mean_squared_error(Y_valid, y_valid_predict) |
|
|
|
return (mse_train, mse_valid,degree) |
|
|
|
a=[] |
|
for i in range(1,8): |
|
a.append(create_polynomial_regression_model(i)) |
|
df = pd.DataFrame(a,columns=['Train Error', 'Validation Error', 'Degree']) |
|
df.sort_values(by='Validation Error') |
|
|
|
scaler = StandardScaler() |
|
train_x = scaler.fit_transform(train_x) |
|
valid_x = scaler.transform(valid_x) |
|
|
|
polynomial_features = PolynomialFeatures(degree=2, include_bias=False) |
|
train_x_poly = polynomial_features.fit_transform(train_x) |
|
valid_x_poly = polynomial_features.fit_transform(valid_x) |
|
|
|
polymodel = LinearRegression() |
|
polymodel.fit(train_x_poly, train_y) |
|
pred = polymodel.predict(valid_x_poly) |
|
r2 = metrics.r2_score(valid_y,pred) |
|
|
|
print('r2_score:', r2) |