File size: 10,192 Bytes
6235903
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier

sns.set(style="whitegrid")
plt.rcParams['figure.figsize'] = (10, 6)

data = pd.read_csv('/content/Facebook Metrics of Cosmetic Brand.csv')
data.head()

!pip install pingouin
!pip install simpy

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pingouin as pg
import simpy
import random
import joblib
from scipy import stats
from scipy.stats import shapiro, f_oneway, pearsonr, chi2_contingency, ttest_ind
from scipy.fft import fft
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score, confusion_matrix, classification_report
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, VotingRegressor
from sklearn.utils import resample
from sklearn.impute import SimpleImputer
from sklearn.inspection import PartialDependenceDisplay
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tsa.stattools import ccf

from pandas.plotting import autocorrelation_plot, lag_plot
import warnings

warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning)
warnings.filterwarnings('ignore', category=RuntimeWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=ImportWarning)
warnings.filterwarnings('ignore', category=SyntaxWarning)
warnings.filterwarnings('ignore', category=PendingDeprecationWarning)
warnings.filterwarnings('ignore', category=ResourceWarning)

sns.set(style='whitegrid')
plt.rcParams['figure.figsize'] = (12, 8)

data = pd.read_csv('/content/Facebook Metrics of Cosmetic Brand.csv')

print("Sample of dataset:")
display(data.head())

print(f"Dataset shape: {data.shape}")

print(f"Columns in the dataset: {data.columns.tolist()}")

print("\nDataset Information:")
data.info()

print("\nSummary Statistics:")
display(data.describe())

print("\nSummary Statistics for Categorical Columns:")
categorical_columns = data.select_dtypes(include=['object']).columns
display(data[categorical_columns].describe())

print("\nSummary Statistics for Cetegorical Columns:")
categorical_columns = data.select_dtypes(include=['object']).columns
display(data[categorical_columns].describe())

duplicate_rows = data.duplicated().sum()
print(f"\nNumber of duplicate rows: {duplicate_rows}")

print("\nUnique values in each column:")
for column in data.columns:
  unique_values = data[column].nunique()
  print(f"{column}: {unique_values} unique values")

print("\nDistribution of uniquye values in categorical columns:")
for column in categorical_columns:
  value_counts = data[column].value_counts()
  print(f"\n{column} distribution")
  print(value_counts)

print("\nSkewness of numerical columns:")
numerical_columns = data.select_dtypes(include=[np.number]).columns
skewness = data[numerical_columns].skew()
print(skewness)

print("\nKutosis of numerical columns:")
kurtosis = data[numerical_columns].kurtosis()
print(kurtosis)

print("\nPairwise correlatoin of numerical features:")
pairwise_corr = data[numerical_columns].corr()
display(pairwise_corr)

print("\nHighly correlated feature pairs:")
threshold = 0.8
high_corr_pairs = [(i, j, pairwise_corr.loc[i, j]) for i in pairwise_corr.columns for j in pairwise_corr.columns if i != j and abs(pairwise_corr.loc[i, j]) > threshold]
for i, j, corr_value in high_corr_pairs:
  print(f"Correlation between {i} and {j}: {corr_value:.2f}")

print("\nVariance Inflation Factor (VIF) analysis for multicollinearity:")
vif_data = pd.DataFrame()
vif_data["features"] = numerical_columns
vif_data["VIF"] = [variance_inflation_factor(data[numerical_columns].fillna(0).values, i) for i in range(len(numerical_columns))]
display(vif_data)

print("\nShapiro-Wilk test for normality of numerical columns:")
for col in numerical_columns:
  stat, p = shapiro(data[col].dropna())
  print(f"Shapiro-Wilk test for {col}: Statistics={stat:.3f}, p={p:.3f}")
  if p > 0.05:
        print(f"The {col} distribution looks normal (fail to reject H0)\n")
  else:
        print(f"The {col} distribution does not look normal (reject H0)\n")

print("\nANOVA test for interaction between categorical and numerical features:")
for cat_col in categorical_columns:
  for num_col in numerical_columns:
    groups = [data[num_col][data[cat_col] == cat]
  for cat in data[cat_col].unique()]
  f_stat, p_val = f_oneway(*groups)
  print(f"ANOVA test for interaction between {cat_col} and {num_col}: F-statistic={f_stat:.3f}, p-value={p_val:.3f}")
  if p_val < 0.05:
    print(f"Significant interaction detected between {cat_col} and {num_col}\n")
  else:
    print(f"No significant interaction detected between {cat_col} and {num_col}")

print("\nMissing Values in Each Column:")
missing_values = data.isnull().sum()
missing_percentage = data.isnull().mean() * 100
missing_data = pd.DataFrame({
    'Missing Values': missing_values,
    'Percentage': missing_percentage
})
display(missing_data)

plt.figure(figsize=(12, 8))
sns.heatmap(data.isnull(), cbar=False, cmap='viridis')
plt.title('Missing Data Heatmap')
plt.show()

threshold = 30
columns_with_missing_above_threshold = missing_data[missing_data['Percentage'] > threshold].index.tolist()
print(f"\nColumns with more than {threshold}% missing values:")
print(columns_with_missing_above_threshold)

data_cleaned = data.drop(columns = columns_with_missing_above_threshold)
print(f"\nShape of data after dropping columns with > {threshold}% missing values: {data_cleaned.shape}")

numerical_columns = data_cleaned.select_dtypes(include=[np.number]).columns
data_cleaned[numerical_columns] = data_cleaned[numerical_columns].fillna(data_cleaned[numerical_columns].median())

categorical_columns = data_cleaned.select_dtypes(include=['object']).columns
for column in categorical_columns:
  data_cleaned[column].fillna(data_cleaned[column].mode()[0], inplace=True)

print("\nMissing Values After Imputation:")
display(data_cleaned.isnull().sum())

print("\nDistribution of 'Type' column:")
type_counts = data['Type'].value_counts()
display(type_counts)

plt.figure(figsize=(10, 6))
sns.countplot(x='Type', data=data, palette='Set3')
plt.title('Distribution of Post Types')
plt.xlabel('Type of Post')
plt.ylabel('Count')
plt.show()

print("\nDistribution of 'Category' column:")
category_counts = data['Category'].value_counts
display(category_counts)

plt.figure(figsize=(10, 6))
sns.countplot(x='Category', data=data, palette='Set2')
plt.title('Distribution of Post Categories')
plt.xlabel('Category of Post')
plt.ylabel('Count')
plt.show()

print("\nDistribution of 'Paid' column:")
paid_counts = data['Paid'].value_counts()
display(paid_counts)

plt.figure(figsize=(10, 6))
sns.countplot(x='Paid', data=data, palette='Set1')
plt.title('Distribution of Paid vs Non-Paid Posts')
plt.xlabel('Paid (1 = Yes, 0 = No)')
plt.ylabel('Count')
plt.show()

print("\nCross-tabulation of 'Type' and 'Paid' columns:")
type_paid_crosstab = pd.crosstab(data['Type'], data['Paid'])
display(type_paid_crosstab)

type_paid_crosstab.plot(kind='bar', stacked=True, colormap='coolwarm')
plt.title('Stacked Bar Plot of Post Type vs Paid Status')
plt.xlabel('Type of Post')
plt.ylabel('Count')
plt.legend(title='Paid', loc='upper right')
plt.show()

print("\nCross-tabulation of 'Category' and 'Paid' columns:")
category_paid_crosstab = pd.crosstab(data['Category'], data['Paid'])
display(category_paid_crosstab)

category_paid_crosstab.plot(kind='bar', stacked=True, colormap='viridis')
plt.title('Stacked Bar Plot of Post Catgory vs Paid Status')
plt.xlabel('Category of Post')
plt.ylabel('Count')
plt.legend(title='Paid', loc='upper right')
plt.show()

numerical_metrics = ['like', 'comment', 'share']

for metric in numerical_metrics:
  plt.figure(figsize=(18, 6))
  plt.subplot(1, 3, 1)
  sns.boxplot(x='Type', y=metric, data=data, palette='Set3')
  plt.title(f'Distribution of {metric} by Post Type')

  plt.subplot(1, 3, 2)
  sns.boxplot(x='Category', y=metric, data=data, palette='Set2')
  plt.title(f'Distribution of {metric} by Post Category')

  plt.subplot(1, 3, 3)
  sns.boxplot(x='Paid', y=metric, data=data, palette='Set1')
  plt.title(f'Distribution of {metric} by Paid Status')

  plt.tight_layout()
  plt.show()

for metric in numerical_metrics:
  plt.figure(figsize=(18, 6))
  plt.subplot(1, 3, 1)
  sns.violinplot(x='Type', y=metric, data=data, palette='coolwarm', inner='quartile')
  plt.title(f'Violin Plot of {metric} by Post Type')
  plt.subplot(1, 3, 2)
  sns.violinplot(x='Category', y=metric, data=data, palette='viridis', inner='quartile')
  plt.title(f'Violin Plot of {metric} by Post Category')

  plt.subplot(1, 3, 3)
  sns.violinplot(x='Paid', y=metric, data=data, palette='magma', inner='quartile')
  plt.title(f'Violin Plof of {metric} by Paid Status')

  plt.tight_layout()
  plt.show()

from scipy.stats import chi2_contingency

categorical_pairs = [('Type', 'Paid'), ('Category', 'Paid'), ('Type', 'Category')]
print("\nChi-Square Test for Independence between Categorical Variables:")
for pair in categorical_pairs:
  contingency_table = pd.crosstab(data[pair[0]], data[pair[1]])
  chi2, p, dof, expected = chi2_contingency(contingency_table)

  print(f"Chi-Square Test between {pair[0]} and {pair[1]}:")
  print(f"Chi2 = {chi2:.2f}, p-value = {p:.3f}")
  if p < 0.05:
    print(f"There is a significant association between {pair[0]} and {pair[1]}.\n")
  else:
    print(f"No significant association between {pair[0]} and {pair[1]}.\n")