antitheft159 commited on
Commit
6235903
·
verified ·
1 Parent(s): d393c66

Upload facebook_metrics_index.py

Browse files
Files changed (1) hide show
  1. facebook_metrics_index.py +287 -0
facebook_metrics_index.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Facebook Metrics Index
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1oGTWA0ohvfmgTOB8V4K7xTlqjwuHHCJR
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ import matplotlib.pyplot as plt
13
+ import seaborn as sns
14
+ from sklearn.model_selection import train_test_split, GridSearchCV
15
+ from sklearn.metrics import mean_squared_error, accuracy_score
16
+ from sklearn.linear_model import LinearRegression
17
+ from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
18
+
19
+ sns.set(style="whitegrid")
20
+ plt.rcParams['figure.figsize'] = (10, 6)
21
+
22
+ data = pd.read_csv('/content/Facebook Metrics of Cosmetic Brand.csv')
23
+ data.head()
24
+
25
+ !pip install pingouin
26
+ !pip install simpy
27
+
28
+ import pandas as pd
29
+ import numpy as np
30
+ import matplotlib.pyplot as plt
31
+ import seaborn as sns
32
+ import pingouin as pg
33
+ import simpy
34
+ import random
35
+ import joblib
36
+ from scipy import stats
37
+ from scipy.stats import shapiro, f_oneway, pearsonr, chi2_contingency, ttest_ind
38
+ from scipy.fft import fft
39
+ from sklearn.preprocessing import StandardScaler, LabelEncoder
40
+ from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
41
+ from sklearn.metrics import mean_squared_error, r2_score, accuracy_score, confusion_matrix, classification_report
42
+ from sklearn.linear_model import LogisticRegression, LinearRegression
43
+ from sklearn.tree import DecisionTreeRegressor
44
+ from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, VotingRegressor
45
+ from sklearn.utils import resample
46
+ from sklearn.impute import SimpleImputer
47
+ from sklearn.inspection import PartialDependenceDisplay
48
+ from statsmodels.tsa.seasonal import seasonal_decompose
49
+ from statsmodels.tsa.arima.model import ARIMA
50
+ from statsmodels.stats.outliers_influence import variance_inflation_factor
51
+ from statsmodels.tsa.stattools import ccf
52
+
53
+ from pandas.plotting import autocorrelation_plot, lag_plot
54
+ import warnings
55
+
56
+ warnings.filterwarnings('ignore', category=FutureWarning)
57
+ warnings.filterwarnings('ignore', category=UserWarning)
58
+ warnings.filterwarnings('ignore', category=RuntimeWarning)
59
+ warnings.filterwarnings('ignore', category=DeprecationWarning)
60
+ warnings.filterwarnings('ignore', category=ImportWarning)
61
+ warnings.filterwarnings('ignore', category=SyntaxWarning)
62
+ warnings.filterwarnings('ignore', category=PendingDeprecationWarning)
63
+ warnings.filterwarnings('ignore', category=ResourceWarning)
64
+
65
+ sns.set(style='whitegrid')
66
+ plt.rcParams['figure.figsize'] = (12, 8)
67
+
68
+ data = pd.read_csv('/content/Facebook Metrics of Cosmetic Brand.csv')
69
+
70
+ print("Sample of dataset:")
71
+ display(data.head())
72
+
73
+ print(f"Dataset shape: {data.shape}")
74
+
75
+ print(f"Columns in the dataset: {data.columns.tolist()}")
76
+
77
+ print("\nDataset Information:")
78
+ data.info()
79
+
80
+ print("\nSummary Statistics:")
81
+ display(data.describe())
82
+
83
+ print("\nSummary Statistics for Categorical Columns:")
84
+ categorical_columns = data.select_dtypes(include=['object']).columns
85
+ display(data[categorical_columns].describe())
86
+
87
+ print("\nSummary Statistics for Cetegorical Columns:")
88
+ categorical_columns = data.select_dtypes(include=['object']).columns
89
+ display(data[categorical_columns].describe())
90
+
91
+ duplicate_rows = data.duplicated().sum()
92
+ print(f"\nNumber of duplicate rows: {duplicate_rows}")
93
+
94
+ print("\nUnique values in each column:")
95
+ for column in data.columns:
96
+ unique_values = data[column].nunique()
97
+ print(f"{column}: {unique_values} unique values")
98
+
99
+ print("\nDistribution of uniquye values in categorical columns:")
100
+ for column in categorical_columns:
101
+ value_counts = data[column].value_counts()
102
+ print(f"\n{column} distribution")
103
+ print(value_counts)
104
+
105
+ print("\nSkewness of numerical columns:")
106
+ numerical_columns = data.select_dtypes(include=[np.number]).columns
107
+ skewness = data[numerical_columns].skew()
108
+ print(skewness)
109
+
110
+ print("\nKutosis of numerical columns:")
111
+ kurtosis = data[numerical_columns].kurtosis()
112
+ print(kurtosis)
113
+
114
+ print("\nPairwise correlatoin of numerical features:")
115
+ pairwise_corr = data[numerical_columns].corr()
116
+ display(pairwise_corr)
117
+
118
+ print("\nHighly correlated feature pairs:")
119
+ threshold = 0.8
120
+ high_corr_pairs = [(i, j, pairwise_corr.loc[i, j]) for i in pairwise_corr.columns for j in pairwise_corr.columns if i != j and abs(pairwise_corr.loc[i, j]) > threshold]
121
+ for i, j, corr_value in high_corr_pairs:
122
+ print(f"Correlation between {i} and {j}: {corr_value:.2f}")
123
+
124
+ print("\nVariance Inflation Factor (VIF) analysis for multicollinearity:")
125
+ vif_data = pd.DataFrame()
126
+ vif_data["features"] = numerical_columns
127
+ vif_data["VIF"] = [variance_inflation_factor(data[numerical_columns].fillna(0).values, i) for i in range(len(numerical_columns))]
128
+ display(vif_data)
129
+
130
+ print("\nShapiro-Wilk test for normality of numerical columns:")
131
+ for col in numerical_columns:
132
+ stat, p = shapiro(data[col].dropna())
133
+ print(f"Shapiro-Wilk test for {col}: Statistics={stat:.3f}, p={p:.3f}")
134
+ if p > 0.05:
135
+ print(f"The {col} distribution looks normal (fail to reject H0)\n")
136
+ else:
137
+ print(f"The {col} distribution does not look normal (reject H0)\n")
138
+
139
+ print("\nANOVA test for interaction between categorical and numerical features:")
140
+ for cat_col in categorical_columns:
141
+ for num_col in numerical_columns:
142
+ groups = [data[num_col][data[cat_col] == cat]
143
+ for cat in data[cat_col].unique()]
144
+ f_stat, p_val = f_oneway(*groups)
145
+ print(f"ANOVA test for interaction between {cat_col} and {num_col}: F-statistic={f_stat:.3f}, p-value={p_val:.3f}")
146
+ if p_val < 0.05:
147
+ print(f"Significant interaction detected between {cat_col} and {num_col}\n")
148
+ else:
149
+ print(f"No significant interaction detected between {cat_col} and {num_col}")
150
+
151
+ print("\nMissing Values in Each Column:")
152
+ missing_values = data.isnull().sum()
153
+ missing_percentage = data.isnull().mean() * 100
154
+ missing_data = pd.DataFrame({
155
+ 'Missing Values': missing_values,
156
+ 'Percentage': missing_percentage
157
+ })
158
+ display(missing_data)
159
+
160
+ plt.figure(figsize=(12, 8))
161
+ sns.heatmap(data.isnull(), cbar=False, cmap='viridis')
162
+ plt.title('Missing Data Heatmap')
163
+ plt.show()
164
+
165
+ threshold = 30
166
+ columns_with_missing_above_threshold = missing_data[missing_data['Percentage'] > threshold].index.tolist()
167
+ print(f"\nColumns with more than {threshold}% missing values:")
168
+ print(columns_with_missing_above_threshold)
169
+
170
+ data_cleaned = data.drop(columns = columns_with_missing_above_threshold)
171
+ print(f"\nShape of data after dropping columns with > {threshold}% missing values: {data_cleaned.shape}")
172
+
173
+ numerical_columns = data_cleaned.select_dtypes(include=[np.number]).columns
174
+ data_cleaned[numerical_columns] = data_cleaned[numerical_columns].fillna(data_cleaned[numerical_columns].median())
175
+
176
+ categorical_columns = data_cleaned.select_dtypes(include=['object']).columns
177
+ for column in categorical_columns:
178
+ data_cleaned[column].fillna(data_cleaned[column].mode()[0], inplace=True)
179
+
180
+ print("\nMissing Values After Imputation:")
181
+ display(data_cleaned.isnull().sum())
182
+
183
+ print("\nDistribution of 'Type' column:")
184
+ type_counts = data['Type'].value_counts()
185
+ display(type_counts)
186
+
187
+ plt.figure(figsize=(10, 6))
188
+ sns.countplot(x='Type', data=data, palette='Set3')
189
+ plt.title('Distribution of Post Types')
190
+ plt.xlabel('Type of Post')
191
+ plt.ylabel('Count')
192
+ plt.show()
193
+
194
+ print("\nDistribution of 'Category' column:")
195
+ category_counts = data['Category'].value_counts
196
+ display(category_counts)
197
+
198
+ plt.figure(figsize=(10, 6))
199
+ sns.countplot(x='Category', data=data, palette='Set2')
200
+ plt.title('Distribution of Post Categories')
201
+ plt.xlabel('Category of Post')
202
+ plt.ylabel('Count')
203
+ plt.show()
204
+
205
+ print("\nDistribution of 'Paid' column:")
206
+ paid_counts = data['Paid'].value_counts()
207
+ display(paid_counts)
208
+
209
+ plt.figure(figsize=(10, 6))
210
+ sns.countplot(x='Paid', data=data, palette='Set1')
211
+ plt.title('Distribution of Paid vs Non-Paid Posts')
212
+ plt.xlabel('Paid (1 = Yes, 0 = No)')
213
+ plt.ylabel('Count')
214
+ plt.show()
215
+
216
+ print("\nCross-tabulation of 'Type' and 'Paid' columns:")
217
+ type_paid_crosstab = pd.crosstab(data['Type'], data['Paid'])
218
+ display(type_paid_crosstab)
219
+
220
+ type_paid_crosstab.plot(kind='bar', stacked=True, colormap='coolwarm')
221
+ plt.title('Stacked Bar Plot of Post Type vs Paid Status')
222
+ plt.xlabel('Type of Post')
223
+ plt.ylabel('Count')
224
+ plt.legend(title='Paid', loc='upper right')
225
+ plt.show()
226
+
227
+ print("\nCross-tabulation of 'Category' and 'Paid' columns:")
228
+ category_paid_crosstab = pd.crosstab(data['Category'], data['Paid'])
229
+ display(category_paid_crosstab)
230
+
231
+ category_paid_crosstab.plot(kind='bar', stacked=True, colormap='viridis')
232
+ plt.title('Stacked Bar Plot of Post Catgory vs Paid Status')
233
+ plt.xlabel('Category of Post')
234
+ plt.ylabel('Count')
235
+ plt.legend(title='Paid', loc='upper right')
236
+ plt.show()
237
+
238
+ numerical_metrics = ['like', 'comment', 'share']
239
+
240
+ for metric in numerical_metrics:
241
+ plt.figure(figsize=(18, 6))
242
+ plt.subplot(1, 3, 1)
243
+ sns.boxplot(x='Type', y=metric, data=data, palette='Set3')
244
+ plt.title(f'Distribution of {metric} by Post Type')
245
+
246
+ plt.subplot(1, 3, 2)
247
+ sns.boxplot(x='Category', y=metric, data=data, palette='Set2')
248
+ plt.title(f'Distribution of {metric} by Post Category')
249
+
250
+ plt.subplot(1, 3, 3)
251
+ sns.boxplot(x='Paid', y=metric, data=data, palette='Set1')
252
+ plt.title(f'Distribution of {metric} by Paid Status')
253
+
254
+ plt.tight_layout()
255
+ plt.show()
256
+
257
+ for metric in numerical_metrics:
258
+ plt.figure(figsize=(18, 6))
259
+ plt.subplot(1, 3, 1)
260
+ sns.violinplot(x='Type', y=metric, data=data, palette='coolwarm', inner='quartile')
261
+ plt.title(f'Violin Plot of {metric} by Post Type')
262
+ plt.subplot(1, 3, 2)
263
+ sns.violinplot(x='Category', y=metric, data=data, palette='viridis', inner='quartile')
264
+ plt.title(f'Violin Plot of {metric} by Post Category')
265
+
266
+ plt.subplot(1, 3, 3)
267
+ sns.violinplot(x='Paid', y=metric, data=data, palette='magma', inner='quartile')
268
+ plt.title(f'Violin Plof of {metric} by Paid Status')
269
+
270
+ plt.tight_layout()
271
+ plt.show()
272
+
273
+ from scipy.stats import chi2_contingency
274
+
275
+ categorical_pairs = [('Type', 'Paid'), ('Category', 'Paid'), ('Type', 'Category')]
276
+ print("\nChi-Square Test for Independence between Categorical Variables:")
277
+ for pair in categorical_pairs:
278
+ contingency_table = pd.crosstab(data[pair[0]], data[pair[1]])
279
+ chi2, p, dof, expected = chi2_contingency(contingency_table)
280
+
281
+ print(f"Chi-Square Test between {pair[0]} and {pair[1]}:")
282
+ print(f"Chi2 = {chi2:.2f}, p-value = {p:.3f}")
283
+ if p < 0.05:
284
+ print(f"There is a significant association between {pair[0]} and {pair[1]}.\n")
285
+ else:
286
+ print(f"No significant association between {pair[0]} and {pair[1]}.\n")
287
+