|
import pandas as pd |
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
import seaborn as sns |
|
from sklearn.model_selection import train_test_split, GridSearchCV |
|
from sklearn.metrics import mean_squared_error, accuracy_score |
|
from sklearn.linear_model import LinearRegression |
|
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier |
|
|
|
sns.set(style="whitegrid") |
|
plt.rcParams['figure.figsize'] = (10, 6) |
|
|
|
data = pd.read_csv('/content/Facebook Metrics of Cosmetic Brand.csv') |
|
data.head() |
|
|
|
!pip install pingouin |
|
!pip install simpy |
|
|
|
import pandas as pd |
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
import seaborn as sns |
|
import pingouin as pg |
|
import simpy |
|
import random |
|
import joblib |
|
from scipy import stats |
|
from scipy.stats import shapiro, f_oneway, pearsonr, chi2_contingency, ttest_ind |
|
from scipy.fft import fft |
|
from sklearn.preprocessing import StandardScaler, LabelEncoder |
|
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score |
|
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score, confusion_matrix, classification_report |
|
from sklearn.linear_model import LogisticRegression, LinearRegression |
|
from sklearn.tree import DecisionTreeRegressor |
|
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, VotingRegressor |
|
from sklearn.utils import resample |
|
from sklearn.impute import SimpleImputer |
|
from sklearn.inspection import PartialDependenceDisplay |
|
from statsmodels.tsa.seasonal import seasonal_decompose |
|
from statsmodels.tsa.arima.model import ARIMA |
|
from statsmodels.stats.outliers_influence import variance_inflation_factor |
|
from statsmodels.tsa.stattools import ccf |
|
|
|
from pandas.plotting import autocorrelation_plot, lag_plot |
|
import warnings |
|
|
|
warnings.filterwarnings('ignore', category=FutureWarning) |
|
warnings.filterwarnings('ignore', category=UserWarning) |
|
warnings.filterwarnings('ignore', category=RuntimeWarning) |
|
warnings.filterwarnings('ignore', category=DeprecationWarning) |
|
warnings.filterwarnings('ignore', category=ImportWarning) |
|
warnings.filterwarnings('ignore', category=SyntaxWarning) |
|
warnings.filterwarnings('ignore', category=PendingDeprecationWarning) |
|
warnings.filterwarnings('ignore', category=ResourceWarning) |
|
|
|
sns.set(style='whitegrid') |
|
plt.rcParams['figure.figsize'] = (12, 8) |
|
|
|
data = pd.read_csv('/content/Facebook Metrics of Cosmetic Brand.csv') |
|
|
|
print("Sample of dataset:") |
|
display(data.head()) |
|
|
|
print(f"Dataset shape: {data.shape}") |
|
|
|
print(f"Columns in the dataset: {data.columns.tolist()}") |
|
|
|
print("\nDataset Information:") |
|
data.info() |
|
|
|
print("\nSummary Statistics:") |
|
display(data.describe()) |
|
|
|
print("\nSummary Statistics for Categorical Columns:") |
|
categorical_columns = data.select_dtypes(include=['object']).columns |
|
display(data[categorical_columns].describe()) |
|
|
|
print("\nSummary Statistics for Cetegorical Columns:") |
|
categorical_columns = data.select_dtypes(include=['object']).columns |
|
display(data[categorical_columns].describe()) |
|
|
|
duplicate_rows = data.duplicated().sum() |
|
print(f"\nNumber of duplicate rows: {duplicate_rows}") |
|
|
|
print("\nUnique values in each column:") |
|
for column in data.columns: |
|
unique_values = data[column].nunique() |
|
print(f"{column}: {unique_values} unique values") |
|
|
|
print("\nDistribution of uniquye values in categorical columns:") |
|
for column in categorical_columns: |
|
value_counts = data[column].value_counts() |
|
print(f"\n{column} distribution") |
|
print(value_counts) |
|
|
|
print("\nSkewness of numerical columns:") |
|
numerical_columns = data.select_dtypes(include=[np.number]).columns |
|
skewness = data[numerical_columns].skew() |
|
print(skewness) |
|
|
|
print("\nKutosis of numerical columns:") |
|
kurtosis = data[numerical_columns].kurtosis() |
|
print(kurtosis) |
|
|
|
print("\nPairwise correlatoin of numerical features:") |
|
pairwise_corr = data[numerical_columns].corr() |
|
display(pairwise_corr) |
|
|
|
print("\nHighly correlated feature pairs:") |
|
threshold = 0.8 |
|
high_corr_pairs = [(i, j, pairwise_corr.loc[i, j]) for i in pairwise_corr.columns for j in pairwise_corr.columns if i != j and abs(pairwise_corr.loc[i, j]) > threshold] |
|
for i, j, corr_value in high_corr_pairs: |
|
print(f"Correlation between {i} and {j}: {corr_value:.2f}") |
|
|
|
print("\nVariance Inflation Factor (VIF) analysis for multicollinearity:") |
|
vif_data = pd.DataFrame() |
|
vif_data["features"] = numerical_columns |
|
vif_data["VIF"] = [variance_inflation_factor(data[numerical_columns].fillna(0).values, i) for i in range(len(numerical_columns))] |
|
display(vif_data) |
|
|
|
print("\nShapiro-Wilk test for normality of numerical columns:") |
|
for col in numerical_columns: |
|
stat, p = shapiro(data[col].dropna()) |
|
print(f"Shapiro-Wilk test for {col}: Statistics={stat:.3f}, p={p:.3f}") |
|
if p > 0.05: |
|
print(f"The {col} distribution looks normal (fail to reject H0)\n") |
|
else: |
|
print(f"The {col} distribution does not look normal (reject H0)\n") |
|
|
|
print("\nANOVA test for interaction between categorical and numerical features:") |
|
for cat_col in categorical_columns: |
|
for num_col in numerical_columns: |
|
groups = [data[num_col][data[cat_col] == cat] |
|
for cat in data[cat_col].unique()] |
|
f_stat, p_val = f_oneway(*groups) |
|
print(f"ANOVA test for interaction between {cat_col} and {num_col}: F-statistic={f_stat:.3f}, p-value={p_val:.3f}") |
|
if p_val < 0.05: |
|
print(f"Significant interaction detected between {cat_col} and {num_col}\n") |
|
else: |
|
print(f"No significant interaction detected between {cat_col} and {num_col}") |
|
|
|
print("\nMissing Values in Each Column:") |
|
missing_values = data.isnull().sum() |
|
missing_percentage = data.isnull().mean() * 100 |
|
missing_data = pd.DataFrame({ |
|
'Missing Values': missing_values, |
|
'Percentage': missing_percentage |
|
}) |
|
display(missing_data) |
|
|
|
plt.figure(figsize=(12, 8)) |
|
sns.heatmap(data.isnull(), cbar=False, cmap='viridis') |
|
plt.title('Missing Data Heatmap') |
|
plt.show() |
|
|
|
threshold = 30 |
|
columns_with_missing_above_threshold = missing_data[missing_data['Percentage'] > threshold].index.tolist() |
|
print(f"\nColumns with more than {threshold}% missing values:") |
|
print(columns_with_missing_above_threshold) |
|
|
|
data_cleaned = data.drop(columns = columns_with_missing_above_threshold) |
|
print(f"\nShape of data after dropping columns with > {threshold}% missing values: {data_cleaned.shape}") |
|
|
|
numerical_columns = data_cleaned.select_dtypes(include=[np.number]).columns |
|
data_cleaned[numerical_columns] = data_cleaned[numerical_columns].fillna(data_cleaned[numerical_columns].median()) |
|
|
|
categorical_columns = data_cleaned.select_dtypes(include=['object']).columns |
|
for column in categorical_columns: |
|
data_cleaned[column].fillna(data_cleaned[column].mode()[0], inplace=True) |
|
|
|
print("\nMissing Values After Imputation:") |
|
display(data_cleaned.isnull().sum()) |
|
|
|
print("\nDistribution of 'Type' column:") |
|
type_counts = data['Type'].value_counts() |
|
display(type_counts) |
|
|
|
plt.figure(figsize=(10, 6)) |
|
sns.countplot(x='Type', data=data, palette='Set3') |
|
plt.title('Distribution of Post Types') |
|
plt.xlabel('Type of Post') |
|
plt.ylabel('Count') |
|
plt.show() |
|
|
|
print("\nDistribution of 'Category' column:") |
|
category_counts = data['Category'].value_counts |
|
display(category_counts) |
|
|
|
plt.figure(figsize=(10, 6)) |
|
sns.countplot(x='Category', data=data, palette='Set2') |
|
plt.title('Distribution of Post Categories') |
|
plt.xlabel('Category of Post') |
|
plt.ylabel('Count') |
|
plt.show() |
|
|
|
print("\nDistribution of 'Paid' column:") |
|
paid_counts = data['Paid'].value_counts() |
|
display(paid_counts) |
|
|
|
plt.figure(figsize=(10, 6)) |
|
sns.countplot(x='Paid', data=data, palette='Set1') |
|
plt.title('Distribution of Paid vs Non-Paid Posts') |
|
plt.xlabel('Paid (1 = Yes, 0 = No)') |
|
plt.ylabel('Count') |
|
plt.show() |
|
|
|
print("\nCross-tabulation of 'Type' and 'Paid' columns:") |
|
type_paid_crosstab = pd.crosstab(data['Type'], data['Paid']) |
|
display(type_paid_crosstab) |
|
|
|
type_paid_crosstab.plot(kind='bar', stacked=True, colormap='coolwarm') |
|
plt.title('Stacked Bar Plot of Post Type vs Paid Status') |
|
plt.xlabel('Type of Post') |
|
plt.ylabel('Count') |
|
plt.legend(title='Paid', loc='upper right') |
|
plt.show() |
|
|
|
print("\nCross-tabulation of 'Category' and 'Paid' columns:") |
|
category_paid_crosstab = pd.crosstab(data['Category'], data['Paid']) |
|
display(category_paid_crosstab) |
|
|
|
category_paid_crosstab.plot(kind='bar', stacked=True, colormap='viridis') |
|
plt.title('Stacked Bar Plot of Post Catgory vs Paid Status') |
|
plt.xlabel('Category of Post') |
|
plt.ylabel('Count') |
|
plt.legend(title='Paid', loc='upper right') |
|
plt.show() |
|
|
|
numerical_metrics = ['like', 'comment', 'share'] |
|
|
|
for metric in numerical_metrics: |
|
plt.figure(figsize=(18, 6)) |
|
plt.subplot(1, 3, 1) |
|
sns.boxplot(x='Type', y=metric, data=data, palette='Set3') |
|
plt.title(f'Distribution of {metric} by Post Type') |
|
|
|
plt.subplot(1, 3, 2) |
|
sns.boxplot(x='Category', y=metric, data=data, palette='Set2') |
|
plt.title(f'Distribution of {metric} by Post Category') |
|
|
|
plt.subplot(1, 3, 3) |
|
sns.boxplot(x='Paid', y=metric, data=data, palette='Set1') |
|
plt.title(f'Distribution of {metric} by Paid Status') |
|
|
|
plt.tight_layout() |
|
plt.show() |
|
|
|
for metric in numerical_metrics: |
|
plt.figure(figsize=(18, 6)) |
|
plt.subplot(1, 3, 1) |
|
sns.violinplot(x='Type', y=metric, data=data, palette='coolwarm', inner='quartile') |
|
plt.title(f'Violin Plot of {metric} by Post Type') |
|
plt.subplot(1, 3, 2) |
|
sns.violinplot(x='Category', y=metric, data=data, palette='viridis', inner='quartile') |
|
plt.title(f'Violin Plot of {metric} by Post Category') |
|
|
|
plt.subplot(1, 3, 3) |
|
sns.violinplot(x='Paid', y=metric, data=data, palette='magma', inner='quartile') |
|
plt.title(f'Violin Plof of {metric} by Paid Status') |
|
|
|
plt.tight_layout() |
|
plt.show() |
|
|
|
from scipy.stats import chi2_contingency |
|
|
|
categorical_pairs = [('Type', 'Paid'), ('Category', 'Paid'), ('Type', 'Category')] |
|
print("\nChi-Square Test for Independence between Categorical Variables:") |
|
for pair in categorical_pairs: |
|
contingency_table = pd.crosstab(data[pair[0]], data[pair[1]]) |
|
chi2, p, dof, expected = chi2_contingency(contingency_table) |
|
|
|
print(f"Chi-Square Test between {pair[0]} and {pair[1]}:") |
|
print(f"Chi2 = {chi2:.2f}, p-value = {p:.3f}") |
|
if p < 0.05: |
|
print(f"There is a significant association between {pair[0]} and {pair[1]}.\n") |
|
else: |
|
print(f"No significant association between {pair[0]} and {pair[1]}.\n") |
|
|
|
|