Upload suture_195.py
Browse files- suture_195.py +228 -0
suture_195.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""suture.195
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1IXS6Im1Ap41KG6o9EdDvJUW9N47b5Hp5
|
8 |
+
"""
|
9 |
+
|
10 |
+
import pandas as pd
|
11 |
+
import numpy as np
|
12 |
+
import matplotlib.pyplot as plt
|
13 |
+
import seaborn as sns
|
14 |
+
from sklearn.model_selection import train_test_split, GridSearchCV
|
15 |
+
from sklearn.preprocessing import StandardScaler
|
16 |
+
from sklearn.emsemble import RandomForestClassifier
|
17 |
+
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
|
18 |
+
from imblearn.over_sampling import SMOTE
|
19 |
+
import warnings
|
20 |
+
warnings.filterwarnings('ignore')
|
21 |
+
plt.style.use('ggplot')
|
22 |
+
|
23 |
+
df_train = pd.read_csv('/kaggle/input/social-media-usage-and-emotional-well-being/train.csv')
|
24 |
+
|
25 |
+
df_train.info()
|
26 |
+
|
27 |
+
df_train['Age'].value_counts()
|
28 |
+
|
29 |
+
wrong_values = ['Male', 'Female', 'Non-binary', 'iste mevcut veri kumesini 1000 satira tamamliyorum:']
|
30 |
+
df_train = df_train[~df_train['Age'].isin(wrong_values)]
|
31 |
+
|
32 |
+
df_train['Age'] = df_train['Age'].astype('Int64')
|
33 |
+
|
34 |
+
df_train['Age'].value_counts()
|
35 |
+
|
36 |
+
print("The Shape of Train Dataset is",df_train.shape)
|
37 |
+
|
38 |
+
gender_cols = df_train['Gender'].value_counts().reset_index()
|
39 |
+
gender_cols.columns = ['Gender', 'Count']
|
40 |
+
print(gender_cols)
|
41 |
+
fig, ax = plt.subplots()
|
42 |
+
ax.bar(gender_cols['Gender'], gender_cols['Count'], color
|
43 |
+
= ['pink', 'skyblue', 'grey'] \
|
44 |
+
,width = 0.5)
|
45 |
+
ax.set_title("Distinct Count Distribution of Gender")
|
46 |
+
ax.set_xlable("Gender")
|
47 |
+
ax.set_ylable("Count")
|
48 |
+
plt.show()
|
49 |
+
|
50 |
+
import seaborn as sns
|
51 |
+
import matplotlib.pyplot as plt
|
52 |
+
|
53 |
+
continuous_vars = ['Age', 'Daily_Usage_Time (minutes)', 'Posts_Per_Day', 'Likes_Received_Per_Day' \
|
54 |
+
,'Comments_Received_Per_Day', 'Messages_Sent_Per_Day']
|
55 |
+
|
56 |
+
for var in continuous_vars:
|
57 |
+
plt.figure(figsize=(10, 6))
|
58 |
+
ax = sns.histplot(df_train[var].dropna(), kde=True, color = 'skyblue')
|
59 |
+
plt.title(f'Histogram of {var}')
|
60 |
+
plt.xlabel(var)
|
61 |
+
plt.ylabel('Frequency')
|
62 |
+
plt.grid(True)
|
63 |
+
|
64 |
+
for var in continuous_vars:
|
65 |
+
plt.figure(figsize=(10, 6))
|
66 |
+
sns.boxplot(data=df_train, x='Dominant_Emotion', y=var, palette='pastel')
|
67 |
+
plt.title(f'Box Plot of {var} by Dominant_Emotion')
|
68 |
+
plt.xlabel('Dominant_Emotion')
|
69 |
+
plt.ylabel(var)
|
70 |
+
plt.grid(True)
|
71 |
+
plt.show()
|
72 |
+
|
73 |
+
for var in continuous_vars:
|
74 |
+
plt.figure(figsize=(10, 6))
|
75 |
+
sns.violinplot(data=df_train, x='Dominant_Emotion', y=var, palette='pastel', inner='quartile')
|
76 |
+
plt.title(f'Violin Plot of {var} by Dominant_Emotion')
|
77 |
+
plt.xlable('Dominant_Emotion')
|
78 |
+
plt.ylabel(var)
|
79 |
+
plt.grid(True)
|
80 |
+
plt.show()
|
81 |
+
|
82 |
+
categorical_var = ['Gender', 'Platform']
|
83 |
+
|
84 |
+
for var in categorical_vars:
|
85 |
+
plt.figure(figsize=(10, 6))
|
86 |
+
ax = sns.countplot(data=df_train, x=var, palette='pastel')
|
87 |
+
plt.title(f'Count Plot of {var}')
|
88 |
+
plt.xlabel(var)
|
89 |
+
plt.ylabel('Count')
|
90 |
+
plt.grid(True)
|
91 |
+
for container in ax.containers:
|
92 |
+
ax.bar_label(container, fmt = '%d')
|
93 |
+
plt.show()
|
94 |
+
|
95 |
+
plt.figure(figsize=(10, 6))
|
96 |
+
ax = sns.countplot(data=df_train, x=df_train['Dominant_Emotion'], palette='pastel')
|
97 |
+
plt.title(f'Count Plot of Dominant Emotion')
|
98 |
+
plt.xlabel(var)
|
99 |
+
plt.ylabel('Count')
|
100 |
+
plt.grid(True)
|
101 |
+
for container in ax.containers:
|
102 |
+
ax.bar_label(container, fmt = '%d')
|
103 |
+
plt.show()
|
104 |
+
|
105 |
+
sns.pairplot(df_train[continuous_vars + ['Dominant_Emotion']], hue='Dominant_Emotion', palette='pastel', diag_king='kde')
|
106 |
+
plt.show()
|
107 |
+
|
108 |
+
for var in categorical_vars:
|
109 |
+
plt.figure(figsize=(10, 6))
|
110 |
+
sns.countplot(data=df_train, x=var, hue='Dominant_Emotion', palette='pastel')
|
111 |
+
plt.title(f'Count plot of {var} by Dominant_Emotion')
|
112 |
+
plt.xlabel(var)
|
113 |
+
plt.ylabel('Count')
|
114 |
+
plt.grid(True)
|
115 |
+
plt.show()
|
116 |
+
|
117 |
+
plt.figure(figsize=(12, 8))
|
118 |
+
sns.clustermap(df_train_[continuous_vars].corr(), annot=True, cmap='coolwarm', linewidth=0.5, figsize=(10, 10))
|
119 |
+
plt.title('Clustered correlation Matrix Heatmap')
|
120 |
+
plt.show()
|
121 |
+
|
122 |
+
df = pd.get_dummies(df_train, columns=['Gender', 'Platform'], drop_first=True)
|
123 |
+
df = df.applymap(lambda x: 1 if x is True else 0 if x is False else x)
|
124 |
+
df.head
|
125 |
+
|
126 |
+
df.select_dtypes(['Int64', 'Float64']).corr()
|
127 |
+
|
128 |
+
train_df = pd.read_csv('/kaggle/input/social-media-usage-and-emotional-well-being/train.csv')
|
129 |
+
test_df = pd.read_csv('/kaggle/input/social-media-usage-and-emotional-well-being/test.csv')
|
130 |
+
|
131 |
+
def count_outliers(df):
|
132 |
+
numeric_cols = df.select_dtypes(include=[np.number]).columns
|
133 |
+
outliers = {}
|
134 |
+
for col in numeric_cols:
|
135 |
+
upper_limit = df[col].quantile(0.99)
|
136 |
+
outliers[col] = (df[col] > upper_limit).sum()
|
137 |
+
return outliers
|
138 |
+
|
139 |
+
outliers_count_train = count_outliers(train_df.drop(columns = ['User_ID']))
|
140 |
+
outliers_count_test = count_outliers(test_df.drop(columns = ['User_ID']))
|
141 |
+
|
142 |
+
print("Outliers count based on the 99th percentile:")
|
143 |
+
for col, count in outliers_count_train.items():
|
144 |
+
print(f"{col}: {count}")
|
145 |
+
|
146 |
+
print("Outliers count based on the 99th percentile:")
|
147 |
+
for col, count in outliers_count_test.items():
|
148 |
+
print(f"{col}: {count}")
|
149 |
+
|
150 |
+
def remove_outliers(df):
|
151 |
+
numeric_cols = df.select_dtypes(include=[no.number]).columns
|
152 |
+
for col in numeric_cols:
|
153 |
+
upper_limit = df[col].quantile(0.99)
|
154 |
+
df = df[df[col] <= upper_limit]
|
155 |
+
return df
|
156 |
+
|
157 |
+
df_cleaned_train = remove_outliers(train_df)
|
158 |
+
df_cleaner_test = remove_outliers(test_df)
|
159 |
+
|
160 |
+
print("Original dataset shape:", df_train.shape)
|
161 |
+
print("Cleaned dataset shape:", df_cleaned_train.shape)
|
162 |
+
|
163 |
+
train_df = df_cleaned_train
|
164 |
+
test_df = df_cleaned_test
|
165 |
+
|
166 |
+
wrong_values = ['Male', 'Female', 'Non-binary', 'iste mevcut veri kumesini 1000 satira tamaliyorum:']
|
167 |
+
train_df = train_df[~train_df['Age'].isin(wrong_values)]
|
168 |
+
train_df['Age'] = train_df['Age'].astype('Int64')
|
169 |
+
|
170 |
+
test_df = test_df[~test_df['Age'].isin(wrong_values)]
|
171 |
+
test_df['Age'] = test_df['Age'].astype('Int64')
|
172 |
+
|
173 |
+
train_df.fillna(method='ffil', inplace=True)
|
174 |
+
test_df.fillna(method='ffil', inplace=True)
|
175 |
+
|
176 |
+
X_train = train_df.drop('Dominant_Emotion', axis=1)
|
177 |
+
y_train = train_df['Dominant_Emotion']
|
178 |
+
|
179 |
+
X_test = test_df.drop('Dominant_Emotion', axis=1)
|
180 |
+
y_test = test_df['Dominant_Emotion']
|
181 |
+
|
182 |
+
X_train = pd.get_dummies(X_train, drop_first=True)
|
183 |
+
X_test = pd.get_dummies(X_test, drop_first=True)
|
184 |
+
|
185 |
+
X_test = X_test.reindex(columns=X_train.columns, fill_value=0)
|
186 |
+
|
187 |
+
scaler = StandardScaler()
|
188 |
+
X_train_scaled = scaler.fit_transform(X_train)
|
189 |
+
X_test_scaled = scaler.transform(X_test)
|
190 |
+
|
191 |
+
rf_classifier = RandomForestClassifier(n_estimators=100, random_state=42)
|
192 |
+
rf_classifier.fit(X_train_scaled, y_train)
|
193 |
+
|
194 |
+
importances = rf_classifier.feature_importances_feature_names = X_train.columns
|
195 |
+
feature_importances = pd.DataFrame({'Feature': feature_names 'Importance': importance})
|
196 |
+
|
197 |
+
feature_importances = features_importances.sort_values
|
198 |
+
(by='Importance', acending=False)
|
199 |
+
top_10_features = feature_importances['Feature'].head
|
200 |
+
(10).values
|
201 |
+
|
202 |
+
print("Top 10 Important Features:")
|
203 |
+
print(feature_importances.head(10))
|
204 |
+
|
205 |
+
plt.figure(figsize=(10, 6))
|
206 |
+
plt.title("Top 10 Feature Importances")
|
207 |
+
plt.barh(feature_importances.head(10)['Feature'], feature_importances.head(10)['Importance'], color='b', align='center')
|
208 |
+
plt.gca().invert_yaxis()
|
209 |
+
plt.xlabel('Relative Importance')
|
210 |
+
plt.show()
|
211 |
+
|
212 |
+
X_train_top10 = X_train[top_10_features]
|
213 |
+
X_test_top10 = X_test[top_10_features]
|
214 |
+
|
215 |
+
X_train_top10_scaled = scaler.fit_transform(X_train_top10)
|
216 |
+
X_test_top10_scaled = scaler.transform(X_test_top10)
|
217 |
+
|
218 |
+
rf_classifier_top10 = RandomForestClassifier(n_estimators=100, random_state=42)
|
219 |
+
rf_classifier_top10.fit(X_train_top10_scaled, y_train)
|
220 |
+
|
221 |
+
y_pred_top10 = fr=classifier_top10.predict(X_test_top10_scaled)
|
222 |
+
|
223 |
+
accuracy_top10 = accuracy_score(y_test, y_pred_top10)
|
224 |
+
print(f"\nAccuracy with Top 10 Features: { accuracy_top10:.2f}")
|
225 |
+
print("Classification Report with Top 10 Features:")
|
226 |
+
print(classification_report(y_test, y_pred_top10))
|
227 |
+
print("Confusion Matrix with Top 10 Features:")
|
228 |
+
print(confusion_matrix(y_test, y_pred_top10))
|