# -*- coding: utf-8 -*- """suture.195 Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1IXS6Im1Ap41KG6o9EdDvJUW9N47b5Hp5 """ import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler from sklearn.emsemble import RandomForestClassifier from sklearn.metrics import accuracy_score, classification_report, confusion_matrix from imblearn.over_sampling import SMOTE import warnings warnings.filterwarnings('ignore') plt.style.use('ggplot') df_train = pd.read_csv('/kaggle/input/social-media-usage-and-emotional-well-being/train.csv') df_train.info() df_train['Age'].value_counts() wrong_values = ['Male', 'Female', 'Non-binary', 'iste mevcut veri kumesini 1000 satira tamamliyorum:'] df_train = df_train[~df_train['Age'].isin(wrong_values)] df_train['Age'] = df_train['Age'].astype('Int64') df_train['Age'].value_counts() print("The Shape of Train Dataset is",df_train.shape) gender_cols = df_train['Gender'].value_counts().reset_index() gender_cols.columns = ['Gender', 'Count'] print(gender_cols) fig, ax = plt.subplots() ax.bar(gender_cols['Gender'], gender_cols['Count'], color = ['pink', 'skyblue', 'grey'] \ ,width = 0.5) ax.set_title("Distinct Count Distribution of Gender") ax.set_xlable("Gender") ax.set_ylable("Count") plt.show() import seaborn as sns import matplotlib.pyplot as plt continuous_vars = ['Age', 'Daily_Usage_Time (minutes)', 'Posts_Per_Day', 'Likes_Received_Per_Day' \ ,'Comments_Received_Per_Day', 'Messages_Sent_Per_Day'] for var in continuous_vars: plt.figure(figsize=(10, 6)) ax = sns.histplot(df_train[var].dropna(), kde=True, color = 'skyblue') plt.title(f'Histogram of {var}') plt.xlabel(var) plt.ylabel('Frequency') plt.grid(True) for var in continuous_vars: plt.figure(figsize=(10, 6)) sns.boxplot(data=df_train, x='Dominant_Emotion', y=var, palette='pastel') plt.title(f'Box Plot of {var} by Dominant_Emotion') plt.xlabel('Dominant_Emotion') plt.ylabel(var) plt.grid(True) plt.show() for var in continuous_vars: plt.figure(figsize=(10, 6)) sns.violinplot(data=df_train, x='Dominant_Emotion', y=var, palette='pastel', inner='quartile') plt.title(f'Violin Plot of {var} by Dominant_Emotion') plt.xlable('Dominant_Emotion') plt.ylabel(var) plt.grid(True) plt.show() categorical_var = ['Gender', 'Platform'] for var in categorical_vars: plt.figure(figsize=(10, 6)) ax = sns.countplot(data=df_train, x=var, palette='pastel') plt.title(f'Count Plot of {var}') plt.xlabel(var) plt.ylabel('Count') plt.grid(True) for container in ax.containers: ax.bar_label(container, fmt = '%d') plt.show() plt.figure(figsize=(10, 6)) ax = sns.countplot(data=df_train, x=df_train['Dominant_Emotion'], palette='pastel') plt.title(f'Count Plot of Dominant Emotion') plt.xlabel(var) plt.ylabel('Count') plt.grid(True) for container in ax.containers: ax.bar_label(container, fmt = '%d') plt.show() sns.pairplot(df_train[continuous_vars + ['Dominant_Emotion']], hue='Dominant_Emotion', palette='pastel', diag_king='kde') plt.show() for var in categorical_vars: plt.figure(figsize=(10, 6)) sns.countplot(data=df_train, x=var, hue='Dominant_Emotion', palette='pastel') plt.title(f'Count plot of {var} by Dominant_Emotion') plt.xlabel(var) plt.ylabel('Count') plt.grid(True) plt.show() plt.figure(figsize=(12, 8)) sns.clustermap(df_train_[continuous_vars].corr(), annot=True, cmap='coolwarm', linewidth=0.5, figsize=(10, 10)) plt.title('Clustered correlation Matrix Heatmap') plt.show() df = pd.get_dummies(df_train, columns=['Gender', 'Platform'], drop_first=True) df = df.applymap(lambda x: 1 if x is True else 0 if x is False else x) df.head df.select_dtypes(['Int64', 'Float64']).corr() train_df = pd.read_csv('/kaggle/input/social-media-usage-and-emotional-well-being/train.csv') test_df = pd.read_csv('/kaggle/input/social-media-usage-and-emotional-well-being/test.csv') def count_outliers(df): numeric_cols = df.select_dtypes(include=[np.number]).columns outliers = {} for col in numeric_cols: upper_limit = df[col].quantile(0.99) outliers[col] = (df[col] > upper_limit).sum() return outliers outliers_count_train = count_outliers(train_df.drop(columns = ['User_ID'])) outliers_count_test = count_outliers(test_df.drop(columns = ['User_ID'])) print("Outliers count based on the 99th percentile:") for col, count in outliers_count_train.items(): print(f"{col}: {count}") print("Outliers count based on the 99th percentile:") for col, count in outliers_count_test.items(): print(f"{col}: {count}") def remove_outliers(df): numeric_cols = df.select_dtypes(include=[no.number]).columns for col in numeric_cols: upper_limit = df[col].quantile(0.99) df = df[df[col] <= upper_limit] return df df_cleaned_train = remove_outliers(train_df) df_cleaner_test = remove_outliers(test_df) print("Original dataset shape:", df_train.shape) print("Cleaned dataset shape:", df_cleaned_train.shape) train_df = df_cleaned_train test_df = df_cleaned_test wrong_values = ['Male', 'Female', 'Non-binary', 'iste mevcut veri kumesini 1000 satira tamaliyorum:'] train_df = train_df[~train_df['Age'].isin(wrong_values)] train_df['Age'] = train_df['Age'].astype('Int64') test_df = test_df[~test_df['Age'].isin(wrong_values)] test_df['Age'] = test_df['Age'].astype('Int64') train_df.fillna(method='ffil', inplace=True) test_df.fillna(method='ffil', inplace=True) X_train = train_df.drop('Dominant_Emotion', axis=1) y_train = train_df['Dominant_Emotion'] X_test = test_df.drop('Dominant_Emotion', axis=1) y_test = test_df['Dominant_Emotion'] X_train = pd.get_dummies(X_train, drop_first=True) X_test = pd.get_dummies(X_test, drop_first=True) X_test = X_test.reindex(columns=X_train.columns, fill_value=0) scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) rf_classifier = RandomForestClassifier(n_estimators=100, random_state=42) rf_classifier.fit(X_train_scaled, y_train) importances = rf_classifier.feature_importances_feature_names = X_train.columns feature_importances = pd.DataFrame({'Feature': feature_names 'Importance': importance}) feature_importances = features_importances.sort_values (by='Importance', acending=False) top_10_features = feature_importances['Feature'].head (10).values print("Top 10 Important Features:") print(feature_importances.head(10)) plt.figure(figsize=(10, 6)) plt.title("Top 10 Feature Importances") plt.barh(feature_importances.head(10)['Feature'], feature_importances.head(10)['Importance'], color='b', align='center') plt.gca().invert_yaxis() plt.xlabel('Relative Importance') plt.show() X_train_top10 = X_train[top_10_features] X_test_top10 = X_test[top_10_features] X_train_top10_scaled = scaler.fit_transform(X_train_top10) X_test_top10_scaled = scaler.transform(X_test_top10) rf_classifier_top10 = RandomForestClassifier(n_estimators=100, random_state=42) rf_classifier_top10.fit(X_train_top10_scaled, y_train) y_pred_top10 = fr=classifier_top10.predict(X_test_top10_scaled) accuracy_top10 = accuracy_score(y_test, y_pred_top10) print(f"\nAccuracy with Top 10 Features: { accuracy_top10:.2f}") print("Classification Report with Top 10 Features:") print(classification_report(y_test, y_pred_top10)) print("Confusion Matrix with Top 10 Features:") print(confusion_matrix(y_test, y_pred_top10))