path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
129022563/cell_46 | [
"text_plain_output_1.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.graph_objects as go
train = pd.read_csv('../input/spaceship-titanic/train.csv')
test = pd.read_csv('../input/spaceship-titanic/test.csv')
submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
STRATEGY = 'median'
train.shape
train.count()
train.isna().sum()
train.drop(columns=['PassengerId'], inplace=True)
test.drop(columns=['PassengerId'], inplace=True)
TARGET = 'Transported'
FEATURES = [col for col in train.columns if col != TARGET]
RANDOM_STATE = 12
test.isna().sum()
test_null = pd.DataFrame(test.isna().sum())
test_null = test_null.sort_values(by=0, ascending=False)
train_null = pd.DataFrame(test.isna().sum())
train_null = train_null.sort_values(by=0, ascending=False)[:-1]
fig = make_subplots(rows = 1,
cols = 2,
column_titles = ["Train Data","Test Data"],
x_title = "Missing Values")
fig.show()
train_null.index
fig.add_trace(go.Bar(x=train_null[0], y=train_null.index, orientation='h'), 1, 1)
fig.add_trace(go.Bar(x=test_null[0], y=test_null.index, orientation='h'), 1, 2)
fig.update_layout(showlegend=False, title_text='Column wise Null Value Distribution', title_x=0.5) | code |
129022563/cell_53 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/spaceship-titanic/train.csv')
test = pd.read_csv('../input/spaceship-titanic/test.csv')
submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
STRATEGY = 'median'
train.shape
train.count()
train.isna().sum()
train.drop(columns=['PassengerId'], inplace=True)
test.drop(columns=['PassengerId'], inplace=True)
TARGET = 'Transported'
FEATURES = [col for col in train.columns if col != TARGET]
RANDOM_STATE = 12
train.iloc[:, :-1].describe().T.sort_values(by='std', ascending=False)
test.isna().sum()
test_null = pd.DataFrame(test.isna().sum())
train_null = pd.DataFrame(test.isna().sum())
train_null = train_null.sort_values(by=0, ascending=False)[:-1]
missing_train_row = train.isna().sum(axis=1)
missing_train_row = pd.DataFrame(missing_train_row.value_counts() / train.shape[0]).reset_index()
train.isna().sum(axis=1).unique()
train.shape[0]
train.isna().sum(axis=1).value_counts()
train.isna().sum(axis=1) | code |
129022563/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/spaceship-titanic/train.csv')
test = pd.read_csv('../input/spaceship-titanic/test.csv')
submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
STRATEGY = 'median'
train.shape
train.count()
train.isna().sum() | code |
129022563/cell_37 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/spaceship-titanic/train.csv')
test = pd.read_csv('../input/spaceship-titanic/test.csv')
submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
STRATEGY = 'median'
train.shape
train.count()
train.isna().sum()
train.drop(columns=['PassengerId'], inplace=True)
test.drop(columns=['PassengerId'], inplace=True)
TARGET = 'Transported'
FEATURES = [col for col in train.columns if col != TARGET]
RANDOM_STATE = 12
test.isna().sum()
test_null = pd.DataFrame(test.isna().sum())
test_null | code |
1007330/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'time_spend_company')
dropdata = data[data['time_spend_company'] >= 8]
data.drop(dropdata.index, inplace=True)
g = sns.FacetGrid(data, hue='left', aspect=4)
g.map(sns.kdeplot, 'average_montly_hours', shade=True)
g.set(xlim=(0, data['average_montly_hours'].max()))
g.add_legend() | code |
1007330/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'time_spend_company') | code |
1007330/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
corr_left = pd.DataFrame(corr['left'].drop('left'))
corr_left.sort_values(by='left', ascending=False) | code |
1007330/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'time_spend_company')
dropdata = data[data['time_spend_company'] >= 8]
data.drop(dropdata.index, inplace=True)
sns.barplot(x='time_spend_company', y='left', hue='salary', data=data) | code |
1007330/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/HR_comma_sep.csv')
(data['sales'].unique(), data['salary'].unique()) | code |
1007330/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
corr_left = pd.DataFrame(corr['left'].drop('left'))
corr_left.sort_values(by='left', ascending=False)
data['avg_hour_project'] = data['average_montly_hours'] * 12 / data['number_project']
data['avg_hour_project_range'] = pd.cut(data['avg_hour_project'], 3)
data[['avg_hour_project_range', 'left']].groupby(['avg_hour_project_range']).mean() | code |
1007330/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'time_spend_company')
dropdata = data[data['time_spend_company'] >= 8]
data.drop(dropdata.index, inplace=True)
sns.barplot(x='time_spend_company', y='left', hue='promotion_last_5years', data=data) | code |
1007330/cell_18 | [
"text_html_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
dropdata = data[data['time_spend_company'] >= 8]
data.drop(dropdata.index, inplace=True)
left = data[data['left'] == 1]
not_left = data[data['left'] == 0]
f, axrrr = plt.subplots(1, 2, sharey=True, sharex=True)
axrrr[0].hist('time_spend_company', data=left, bins=10)
axrrr[0].set_title('Left')
axrrr[0].set_xlabel('Time Spend at the Company')
axrrr[0].set_ylabel('Number of Observations')
axrrr[1].hist('time_spend_company', data=not_left, bins=10)
axrrr[1].set_title('Not Left')
axrrr[1].set_xlabel('time_spend_company')
axrrr[1].set_ylabel('Number of Observations') | code |
1007330/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'time_spend_company')
dropdata = data[data['time_spend_company'] >= 8]
data.drop(dropdata.index, inplace=True)
g = sns.FacetGrid(data, hue='left', aspect=4)
g.map(sns.kdeplot, 'average_montly_hours', shade=True)
g.set(xlim=(0, data['average_montly_hours'].max()))
g.add_legend()
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'average_montly_hours')
(np.mean(data[data['left'] == 1]['average_montly_hours']), np.mean(data[data['left'] == 0]['average_montly_hours']))
data.loc[data['average_montly_hours'] <= 167.333, 'average_montly_hours'] = 0
data.loc[(data['average_montly_hours'] > 167.333) & (data['average_montly_hours'] <= 238.667), 'average_montly_hours'] = 1
data.loc[(data['average_montly_hours'] > 238.667) & (data['average_montly_hours'] <= 310.0), 'average_montly_hours'] = 2
data.drop(['avg_mon_hours_range'], axis=1, inplace=True)
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'number_project')
print('left_median : ', np.median(data[data['left'] == 1]['number_project']))
print('not_left_median : ', np.median(data[data['left'] == 0]['number_project'])) | code |
1007330/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values)
sns.plt.title('Heatmap of Correlation Matrix')
corr | code |
1007330/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'time_spend_company')
dropdata = data[data['time_spend_company'] >= 8]
data.drop(dropdata.index, inplace=True)
sns.barplot(x='time_spend_company', y='left', data=data)
sns.plt.title('Left over time spend at company (barplot)')
sns.factorplot(x='time_spend_company', y='left', data=data, size=5)
sns.plt.title('Left over time spend at company (factorplot)') | code |
1007330/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/HR_comma_sep.csv')
data.head() | code |
1007330/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
corr_left = pd.DataFrame(corr['left'].drop('left'))
corr_left.sort_values(by='left', ascending=False)
data['avg_hour_project'] = data['average_montly_hours'] * 12 / data['number_project']
data['avg_hour_project_range'] = pd.cut(data['avg_hour_project'], 3)
data[['avg_hour_project_range', 'left']].groupby(['avg_hour_project_range']).mean()
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
dropdata = data[data['time_spend_company'] >= 8]
data.drop(dropdata.index, inplace=True)
data['avg_mon_hours_range'] = pd.cut(data['average_montly_hours'], 3)
data[['avg_mon_hours_range', 'left']].groupby(['avg_mon_hours_range']).mean() | code |
1007330/cell_22 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'time_spend_company')
dropdata = data[data['time_spend_company'] >= 8]
data.drop(dropdata.index, inplace=True)
g = sns.FacetGrid(data, hue='left', aspect=4)
g.map(sns.kdeplot, 'average_montly_hours', shade=True)
g.set(xlim=(0, data['average_montly_hours'].max()))
g.add_legend()
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'average_montly_hours')
(np.mean(data[data['left'] == 1]['average_montly_hours']), np.mean(data[data['left'] == 0]['average_montly_hours'])) | code |
1007330/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/HR_comma_sep.csv')
corr = data.corr()
corr = corr
corr
data.loc[data['avg_hour_project'] <= 749.333, 'avg_hour_project'] = 0
data.loc[(data['avg_hour_project'] > 749.333) & (data['avg_hour_project'] <= 1304.667), 'avg_hour_project'] = 1
data.loc[(data['avg_hour_project'] > 1304.667) & (data['avg_hour_project'] <= 1860.0), 'avg_hour_project'] = 2
data.drop(['avg_hour_project_range'], axis=1, inplace=True)
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'time_spend_company')
dropdata = data[data['time_spend_company'] >= 8]
data.drop(dropdata.index, inplace=True)
g = sns.FacetGrid(data, hue='left', aspect=4)
g.map(sns.kdeplot, 'average_montly_hours', shade=True)
g.set(xlim=(0, data['average_montly_hours'].max()))
g.add_legend()
g = sns.FacetGrid(data, col='left')
g.map(sns.boxplot, 'average_montly_hours')
(np.mean(data[data['left'] == 1]['average_montly_hours']), np.mean(data[data['left'] == 0]['average_montly_hours']))
data.loc[data['average_montly_hours'] <= 167.333, 'average_montly_hours'] = 0
data.loc[(data['average_montly_hours'] > 167.333) & (data['average_montly_hours'] <= 238.667), 'average_montly_hours'] = 1
data.loc[(data['average_montly_hours'] > 238.667) & (data['average_montly_hours'] <= 310.0), 'average_montly_hours'] = 2
data.drop(['avg_mon_hours_range'], axis=1, inplace=True)
sns.barplot(x='number_project', y='left', data=data)
sns.plt.title('Left over Number of project') | code |
1007330/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/HR_comma_sep.csv')
data.info() | code |
89130759/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
cat_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype == 'object']
num_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype != 'object']
test, train = (df_merge[df_merge['ind'].eq('test')], df_merge[df_merge['ind'].eq('train')])
test.drop(['TARGET', 'ind'], axis=1, inplace=True)
train.drop(['ind'], axis=1, inplace=True)
from sklearn.feature_selection import VarianceThreshold as VT
var_thres = VT(threshold=0)
var_thres.fit(X_train)
const_cols = [col for col in X_train.columns if col not in X_train.columns[var_thres.get_support()]]
X_train.drop(columns=const_cols, axis=1, inplace=True)
X_valid.drop(columns=const_cols, axis=1, inplace=True)
test.drop(columns=const_cols, axis=1, inplace=True)
(X_train.shape, X_valid.shape, test.shape)
def correlation(dataset, threshold):
col_corr = set()
corr_matrix = dataset.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if corr_matrix.iloc[i, j] > threshold:
colname = corr_matrix.columns[i]
col_corr.add(colname)
return col_corr
corr_features = correlation(X_train, 0.85)
X_train.drop(columns=corr_features, axis=1, inplace=True)
X_valid.drop(columns=corr_features, axis=1, inplace=True)
test.drop(columns=corr_features, axis=1, inplace=True)
(X_train.shape, X_valid.shape, test.shape) | code |
89130759/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
cat_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype == 'object']
num_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype != 'object']
test, train = (df_merge[df_merge['ind'].eq('test')], df_merge[df_merge['ind'].eq('train')])
test.drop(['TARGET', 'ind'], axis=1, inplace=True)
train.drop(['ind'], axis=1, inplace=True) | code |
89130759/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.head() | code |
89130759/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
def get_cols_with_missing_values(DataFrame):
missing_na_columns = DataFrame.isnull().sum()
return missing_na_columns[missing_na_columns > 0]
get_cols_with_missing_values(df_merge) | code |
89130759/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_train.head() | code |
89130759/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
cat_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype == 'object']
num_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype != 'object']
test, train = (df_merge[df_merge['ind'].eq('test')], df_merge[df_merge['ind'].eq('train')])
test.drop(['TARGET', 'ind'], axis=1, inplace=True)
train.drop(['ind'], axis=1, inplace=True)
test.head() | code |
89130759/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
cat_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype == 'object']
num_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype != 'object']
test, train = (df_merge[df_merge['ind'].eq('test')], df_merge[df_merge['ind'].eq('train')])
test.drop(['TARGET', 'ind'], axis=1, inplace=True)
train.drop(['ind'], axis=1, inplace=True)
from sklearn.feature_selection import VarianceThreshold as VT
var_thres = VT(threshold=0)
var_thres.fit(X_train)
const_cols = [col for col in X_train.columns if col not in X_train.columns[var_thres.get_support()]]
X_train.drop(columns=const_cols, axis=1, inplace=True)
X_valid.drop(columns=const_cols, axis=1, inplace=True)
test.drop(columns=const_cols, axis=1, inplace=True)
(X_train.shape, X_valid.shape, test.shape)
def correlation(dataset, threshold):
col_corr = set()
corr_matrix = dataset.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if corr_matrix.iloc[i, j] > threshold:
colname = corr_matrix.columns[i]
col_corr.add(colname)
return col_corr
corr_features = correlation(X_train, 0.85)
print('Features with high correlation ', corr_features) | code |
89130759/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89130759/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
cat_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype == 'object']
print(cat_cols) | code |
89130759/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
cat_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype == 'object']
num_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype != 'object']
print(num_cols) | code |
89130759/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.feature_selection import VarianceThreshold as VT
var_thres = VT(threshold=0)
var_thres.fit(X_train)
const_cols = [col for col in X_train.columns if col not in X_train.columns[var_thres.get_support()]]
print(const_cols) | code |
89130759/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
cat_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype == 'object']
num_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype != 'object']
test, train = (df_merge[df_merge['ind'].eq('test')], df_merge[df_merge['ind'].eq('train')])
test.drop(['TARGET', 'ind'], axis=1, inplace=True)
train.drop(['ind'], axis=1, inplace=True)
from sklearn.feature_selection import VarianceThreshold as VT
var_thres = VT(threshold=0)
var_thres.fit(X_train)
const_cols = [col for col in X_train.columns if col not in X_train.columns[var_thres.get_support()]]
X_train.drop(columns=const_cols, axis=1, inplace=True)
X_valid.drop(columns=const_cols, axis=1, inplace=True)
test.drop(columns=const_cols, axis=1, inplace=True) | code |
89130759/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_test.head() | code |
89130759/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
cat_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype == 'object']
num_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype != 'object']
test, train = (df_merge[df_merge['ind'].eq('test')], df_merge[df_merge['ind'].eq('train')])
test.drop(['TARGET', 'ind'], axis=1, inplace=True)
train.drop(['ind'], axis=1, inplace=True)
from sklearn.feature_selection import VarianceThreshold as VT
var_thres = VT(threshold=0)
var_thres.fit(X_train)
const_cols = [col for col in X_train.columns if col not in X_train.columns[var_thres.get_support()]]
X_train.drop(columns=const_cols, axis=1, inplace=True)
X_valid.drop(columns=const_cols, axis=1, inplace=True)
test.drop(columns=const_cols, axis=1, inplace=True)
(X_train.shape, X_valid.shape, test.shape) | code |
89130759/cell_14 | [
"text_html_output_1.png"
] | from sklearn.feature_selection import VarianceThreshold as VT
var_thres = VT(threshold=0)
var_thres.fit(X_train) | code |
89130759/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape
cat_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype == 'object']
num_cols = [cname for cname in df_merge.columns if df_merge[cname].dtype != 'object']
test, train = (df_merge[df_merge['ind'].eq('test')], df_merge[df_merge['ind'].eq('train')])
test.drop(['TARGET', 'ind'], axis=1, inplace=True)
train.drop(['ind'], axis=1, inplace=True)
train.head() | code |
89130759/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/santander-customer-satisfaction/train.csv')
df_test = pd.read_csv('/kaggle/input/santander-customer-satisfaction/test.csv')
df_merge = pd.concat([df_test.assign(ind='test'), df_train.assign(ind='train')])
df_merge.shape | code |
105174887/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
train_eda.isnull().sum()
train_final = train_eda.dropna()
train_final.isnull().sum()
train_final.shape
train_final.loc[train_final.Sex == 'female', 'Sex'] = 1
train_final.loc[train_final.Sex == 'male', 'Sex'] = 0
train_final['Sex'] = train_final['Sex'].astype(float)
train_final['Sex'] | code |
105174887/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
train_eda.info() | code |
105174887/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape | code |
105174887/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.head() | code |
105174887/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
train_eda.isnull().sum()
train_final = train_eda.dropna()
train_final.isnull().sum()
train_final.shape
train_final.loc[train_final.Sex == 'female', 'Sex'] = 1
train_final.loc[train_final.Sex == 'male', 'Sex'] = 0
train_final['Sex'] = train_final['Sex'].astype(float)
train_final['Sex']
train_final.loc[train_final.Embarked == 'S', 'Embarked'] = 3
train_final.loc[train_final.Embarked == 'C', 'Embarked'] = 2
train_final.loc[train_final.Embarked == 'Q', 'Embarked'] = 1
train_final['Embarked'] = train_final['Embarked'].astype(float)
train_final['Embarked']
train_final.head() | code |
105174887/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
train_eda.isnull().sum()
train_final = train_eda.dropna()
train_final.isnull().sum()
train_final.shape | code |
105174887/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train['Embarked'] | code |
105174887/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
test = test.drop(['Pclass', 'Name', 'Cabin'], axis='columns')
test.head() | code |
105174887/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns | code |
105174887/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
train_eda.isnull().sum()
train_final = train_eda.dropna()
train_final.isnull().sum() | code |
105174887/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105174887/cell_32 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
test = test.drop(['Pclass', 'Name', 'Cabin'], axis='columns')
test.isnull().sum()
test.loc[test.Sex == 'female', 'Sex'] = 1
test.loc[test.Sex == 'male', 'Sex'] = 0
test['Sex'] = test['Sex'].astype(float)
test.loc[test.Embarked == 'S', 'Embarked'] = 3
test.loc[test.Embarked == 'C', 'Embarked'] = 2
test.loc[test.Embarked == 'Q', 'Embarked'] = 1
test['Embarked'] = test['Embarked'].astype(float)
test.isnull().sum()
test.head() | code |
105174887/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum() | code |
105174887/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
print(train_eda['Pclass'].mean())
print(train_eda['Pclass'].median()) | code |
105174887/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
train_eda.isnull().sum() | code |
105174887/cell_31 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
test = test.drop(['Pclass', 'Name', 'Cabin'], axis='columns')
test.isnull().sum()
test.loc[test.Sex == 'female', 'Sex'] = 1
test.loc[test.Sex == 'male', 'Sex'] = 0
test['Sex'] = test['Sex'].astype(float)
test.loc[test.Embarked == 'S', 'Embarked'] = 3
test.loc[test.Embarked == 'C', 'Embarked'] = 2
test.loc[test.Embarked == 'Q', 'Embarked'] = 1
test['Embarked'] = test['Embarked'].astype(float)
test.isnull().sum() | code |
105174887/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
sns.boxplot(x=train_eda['Pclass']) | code |
105174887/cell_22 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
train_eda.isnull().sum()
train_final = train_eda.dropna()
train_final.isnull().sum()
train_final.shape
train_final.loc[train_final.Sex == 'female', 'Sex'] = 1
train_final.loc[train_final.Sex == 'male', 'Sex'] = 0
train_final['Sex'] = train_final['Sex'].astype(float)
train_final['Sex']
train_final.loc[train_final.Embarked == 'S', 'Embarked'] = 3
train_final.loc[train_final.Embarked == 'C', 'Embarked'] = 2
train_final.loc[train_final.Embarked == 'Q', 'Embarked'] = 1
train_final['Embarked'] = train_final['Embarked'].astype(float)
train_final['Embarked'] | code |
105174887/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.describe() | code |
105174887/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
test = test.drop(['Pclass', 'Name', 'Cabin'], axis='columns')
test.isnull().sum() | code |
105174887/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train_eda = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket'], axis='columns')
train_eda.isnull().sum()
train_eda.shape
train_eda.columns
plt.figure(figsize=(10, 7))
sns.heatmap(data=train_eda.corr(), annot=True, cmap='YlGnBu') | code |
105174887/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.info() | code |
90157584/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
train.head() | code |
90157584/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.heatmap(test.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
90157584/cell_25 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train = train.fillna(train['Embarked'].value_counts().index[0])
test = test.fillna(test['Fare'].value_counts().index[0])
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
90157584/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
test.head() | code |
90157584/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
sns.heatmap(test.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
90157584/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train = train.fillna(train['Embarked'].value_counts().index[0])
test = test.fillna(test['Fare'].value_counts().index[0])
pd.get_dummies(train['Embarked'], drop_first=True).head() | code |
90157584/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
test.drop('Cabin', axis=1, inplace=True)
test.head() | code |
90157584/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train = train.fillna(train['Embarked'].value_counts().index[0])
test = test.fillna(test['Fare'].value_counts().index[0])
test.info() | code |
90157584/cell_26 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train = train.fillna(train['Embarked'].value_counts().index[0])
test = test.fillna(test['Fare'].value_counts().index[0])
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
90157584/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train = train.fillna(train['Embarked'].value_counts().index[0])
test = test.fillna(test['Fare'].value_counts().index[0])
train.info() | code |
90157584/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
90157584/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
90157584/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
sns.heatmap(test.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
90157584/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train = train.fillna(train['Embarked'].value_counts().index[0])
test = test.fillna(test['Fare'].value_counts().index[0])
pd.get_dummies(test['Embarked'], drop_first=True).head() | code |
90157584/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='viridis') | code |
90157584/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
sns.countplot(x='Survived', hue='Pclass', data=train, palette='rainbow') | code |
90157584/cell_37 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train = train.fillna(train['Embarked'].value_counts().index[0])
test = test.fillna(test['Fare'].value_counts().index[0])
train.drop(['Sex', 'Embarked', 'Name', 'Ticket'], axis=1, inplace=True)
train.head() | code |
90157584/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
plt.figure(figsize=(12, 7))
sns.boxplot(x='Pclass', y='Age', data=train, palette='winter') | code |
90157584/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.head() | code |
90157584/cell_36 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
sns.set_style('whitegrid')
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train = train.fillna(train['Embarked'].value_counts().index[0])
test = test.fillna(test['Fare'].value_counts().index[0])
test.drop(['Sex', 'Embarked', 'Name', 'Ticket'], axis=1, inplace=True)
test.head() | code |
32067553/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
def add_daily_measures(df):
df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases']
df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities']
for i in range(1, len(df)):
df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases']
df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities']
df.loc[0, 'Daily Cases'] = 0
df.loc[0, 'Daily Deaths'] = 0
return df
df_world = data_train.copy()
df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_world = add_daily_measures(df_world)
df_usa = data_train.query("Country_Region=='US'")
df_usa = df_usa.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_usa = add_daily_measures(df_usa)
df_italy = data_train.query("Country_Region=='Italy'")
df_italy = df_italy.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_italy = add_daily_measures(df_italy)
df_spain = data_train.query("Country_Region=='Spain'")
df_spain = df_spain.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_spain = add_daily_measures(df_spain)
df_korea = data_train.query("Country_Region=='Korea, South'")
df_korea = df_korea.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_korea = add_daily_measures(df_korea)
df_usa.plot(title='USA', y=['Daily Cases', 'Daily Deaths'], x='Date', figsize=(12, 6))
df_italy.plot(title='Italy', y=['Daily Cases', 'Daily Deaths'], x='Date', figsize=(12, 6))
df_spain.plot(title='Spain', y=['Daily Cases', 'Daily Deaths'], x='Date', figsize=(12, 6))
df_korea.plot(title='South Korea', y=['Daily Cases', 'Daily Deaths'], x='Date', figsize=(12, 6)) | code |
32067553/cell_23 | [
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
def add_daily_measures(df):
df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases']
df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities']
for i in range(1, len(df)):
df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases']
df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities']
df.loc[0, 'Daily Cases'] = 0
df.loc[0, 'Daily Deaths'] = 0
return df
df_world = data_train.copy()
df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_world = add_daily_measures(df_world)
df_usa = data_train.query("Country_Region=='US'")
df_usa = df_usa.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_usa = add_daily_measures(df_usa)
df_italy = data_train.query("Country_Region=='Italy'")
df_italy = df_italy.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_italy = add_daily_measures(df_italy)
df_spain = data_train.query("Country_Region=='Spain'")
df_spain = df_spain.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_spain = add_daily_measures(df_spain)
df_korea = data_train.query("Country_Region=='Korea, South'")
df_korea = df_korea.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_korea = add_daily_measures(df_korea)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_daily_tested = pd.read_csv('/kaggle/input/covid19/full-list-covid-19-tests-per-day.csv')
df_usa_tested = data_daily_tested.query("Code=='USA'")
df_italy_tested = data_daily_tested.query("Entity=='Italy'")
df_spain_tested = data_daily_tested.query("Entity=='Spain'")
df_korea_tested = data_daily_tested.query("Entity=='South Korea'")
df_usa_merge = pd.merge(df_usa, df_usa_tested)
df_usa_merge = pd.merge(df_usa_merge, data_flight)
df_usa_data = df_usa_merge.drop(['Date', 'Entity', 'Code', 'US <-> Latin America', 'US <-> China', 'Canada <-> Canada', 'Canada <-> NON Canada', 'Europe <-> Europe', 'Europe <-> UK', 'Europe <-> Latin America', 'UK <-> UK', 'UK <-> NON UK', 'Italy <-> Italy', 'China <-> China', 'Brazil <-> Brazil', 'Brazil <-> NON Brazil', 'India <-> India', 'India <-> NON India', 'Iran <-> Iran'], axis=1)
df_usa_data.head() | code |
32067553/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_daily_tested = pd.read_csv('/kaggle/input/covid19/full-list-covid-19-tests-per-day.csv')
df_usa_tested = data_daily_tested.query("Code=='USA'")
df_italy_tested = data_daily_tested.query("Entity=='Italy'")
df_spain_tested = data_daily_tested.query("Entity=='Spain'")
df_korea_tested = data_daily_tested.query("Entity=='South Korea'")
df_usa_tested.plot(title='USA', x='Date', figsize=(12, 6))
df_italy_tested.plot(title='Italy', x='Date', figsize=(12, 6))
df_korea_tested.plot(title='Korea', x='Date', figsize=(12, 6)) | code |
32067553/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
def add_daily_measures(df):
df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases']
df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities']
for i in range(1, len(df)):
df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases']
df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities']
df.loc[0, 'Daily Cases'] = 0
df.loc[0, 'Daily Deaths'] = 0
return df
df_world = data_train.copy()
df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_world = add_daily_measures(df_world)
df_usa = data_train.query("Country_Region=='US'")
df_usa = df_usa.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_usa = add_daily_measures(df_usa)
df_italy = data_train.query("Country_Region=='Italy'")
df_italy = df_italy.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_italy = add_daily_measures(df_italy)
df_spain = data_train.query("Country_Region=='Spain'")
df_spain = df_spain.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_spain = add_daily_measures(df_spain)
df_korea = data_train.query("Country_Region=='Korea, South'")
df_korea = df_korea.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_korea = add_daily_measures(df_korea)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_daily_tested = pd.read_csv('/kaggle/input/covid19/full-list-covid-19-tests-per-day.csv')
df_usa_tested = data_daily_tested.query("Code=='USA'")
df_italy_tested = data_daily_tested.query("Entity=='Italy'")
df_spain_tested = data_daily_tested.query("Entity=='Spain'")
df_korea_tested = data_daily_tested.query("Entity=='South Korea'")
df_usa_merge = pd.merge(df_usa, df_usa_tested)
df_usa_merge = pd.merge(df_usa_merge, data_flight)
df_usa_data = df_usa_merge.drop(['Date', 'Entity', 'Code', 'US <-> Latin America', 'US <-> China', 'Canada <-> Canada', 'Canada <-> NON Canada', 'Europe <-> Europe', 'Europe <-> UK', 'Europe <-> Latin America', 'UK <-> UK', 'UK <-> NON UK', 'Italy <-> Italy', 'China <-> China', 'Brazil <-> Brazil', 'Brazil <-> NON Brazil', 'India <-> India', 'India <-> NON India', 'Iran <-> Iran'], axis=1)
quant_features = ['Daily Cases', 'Daily change in cumulative total tests', 'US <-> US', 'US <-> NON US', 'US <-> Europe', 'ConfirmedCases']
scaled_features = {}
for each in quant_features:
mean, std = (df_usa_data[each].mean(), df_usa_data[each].std())
scaled_features[each] = [mean, std]
df_usa_data.loc[:, each] = (df_usa_data[each] - mean) / std
df_usa_data.head() | code |
32067553/cell_11 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
def add_daily_measures(df):
df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases']
df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities']
for i in range(1, len(df)):
df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases']
df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities']
df.loc[0, 'Daily Cases'] = 0
df.loc[0, 'Daily Deaths'] = 0
return df
df_world = data_train.copy()
df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_world = add_daily_measures(df_world)
df_world.plot(title='Covid19 World daily status', y=['Daily Cases', 'Daily Deaths'], x='Date', figsize=(12, 6)) | code |
32067553/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_daily_tested = pd.read_csv('/kaggle/input/covid19/full-list-covid-19-tests-per-day.csv')
data_daily_tested.plot(x='Date', figsize=(12, 6)) | code |
32067553/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32067553/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
confirmed_total_date_Italy = data_train[data_train['Country_Region'] == 'Italy'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_Italy = data_train[data_train['Country_Region'] == 'Italy'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_Italy = confirmed_total_date_Italy.join(fatalities_total_date_Italy)
plt.figure(figsize=(17, 10))
plt.subplot(2, 2, 1)
total_date_Italy.plot(ax=plt.gca(), title='Italy')
plt.ylabel('Confirmed infection cases', size=13)
confirmed_total_date_US = data_train[data_train['Country_Region'] == 'US'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_US = data_train[data_train['Country_Region'] == 'US'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_US = confirmed_total_date_US.join(fatalities_total_date_US)
plt.subplot(2, 2, 2)
total_date_US.plot(ax=plt.gca(), title='US')
plt.ylabel('Confirmed infection cases', size=13)
confirmed_total_date_Spain = data_train[data_train['Country_Region'] == 'Spain'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_Spain = data_train[data_train['Country_Region'] == 'Spain'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_Spain = confirmed_total_date_Spain.join(fatalities_total_date_Spain)
plt.subplot(2, 2, 3)
total_date_Spain.plot(ax=plt.gca(), title='Spain')
plt.ylabel('Confirmed infection cases', size=13)
confirmed_total_date_Korea = data_train[data_train['Country_Region'] == 'Korea, South'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_Korea = data_train[data_train['Country_Region'] == 'Korea, South'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_Korea = confirmed_total_date_Korea.join(fatalities_total_date_Korea)
plt.subplot(2, 2, 4)
total_date_Korea.plot(ax=plt.gca(), title='Korea, South')
plt.ylabel('Confirmed infection cases', size=13)
plt.figure(figsize=(17, 10))
plt.subplot(2, 2, 1)
plt.plot(confirmed_total_date_Italy)
plt.plot(confirmed_total_date_US)
plt.plot(confirmed_total_date_Spain)
plt.plot(confirmed_total_date_Korea)
plt.legend(['Italy', 'US', 'Spain', 'Korea, South'], loc='upper left')
plt.title('COVID-19 infections from the first confirmed case', size=15)
plt.xlabel('Days', size=13)
plt.ylabel('Infected cases', size=13)
plt.ylim(0, 180000)
plt.subplot(2, 2, 2)
plt.plot(fatalities_total_date_Italy)
plt.plot(fatalities_total_date_US)
plt.plot(fatalities_total_date_Spain)
plt.plot(fatalities_total_date_Korea)
plt.legend(['Italy', 'US', 'Spain', 'Korea, South'], loc='upper left')
plt.title('COVID-19 Fatalities', size=15)
plt.xlabel('Days', size=13)
plt.ylabel('Infected cases', size=13)
plt.ylim(0, 23000)
plt.show() | code |
32067553/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_daily_tested = pd.read_csv('/kaggle/input/covid19/full-list-covid-19-tests-per-day.csv')
data_daily_tested.head() | code |
32067553/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_flight.head() | code |
32067553/cell_16 | [
"image_output_4.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_flight.plot(x='Date', figsize=(12, 6)) | code |
32067553/cell_38 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sys
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
confirmed_total_date_Italy = data_train[data_train['Country_Region'] == 'Italy'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_Italy = data_train[data_train['Country_Region'] == 'Italy'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_Italy = confirmed_total_date_Italy.join(fatalities_total_date_Italy)
confirmed_total_date_US = data_train[data_train['Country_Region'] == 'US'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_US = data_train[data_train['Country_Region'] == 'US'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_US = confirmed_total_date_US.join(fatalities_total_date_US)
confirmed_total_date_Spain = data_train[data_train['Country_Region'] == 'Spain'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_Spain = data_train[data_train['Country_Region'] == 'Spain'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_Spain = confirmed_total_date_Spain.join(fatalities_total_date_Spain)
confirmed_total_date_Korea = data_train[data_train['Country_Region'] == 'Korea, South'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_Korea = data_train[data_train['Country_Region'] == 'Korea, South'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_Korea = confirmed_total_date_Korea.join(fatalities_total_date_Korea)
plt.ylim(0, 180000)
plt.ylim(0, 23000)
def add_daily_measures(df):
df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases']
df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities']
for i in range(1, len(df)):
df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases']
df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities']
df.loc[0, 'Daily Cases'] = 0
df.loc[0, 'Daily Deaths'] = 0
return df
df_world = data_train.copy()
df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_world = add_daily_measures(df_world)
df_usa = data_train.query("Country_Region=='US'")
df_usa = df_usa.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_usa = add_daily_measures(df_usa)
df_italy = data_train.query("Country_Region=='Italy'")
df_italy = df_italy.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_italy = add_daily_measures(df_italy)
df_spain = data_train.query("Country_Region=='Spain'")
df_spain = df_spain.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_spain = add_daily_measures(df_spain)
df_korea = data_train.query("Country_Region=='Korea, South'")
df_korea = df_korea.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_korea = add_daily_measures(df_korea)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_daily_tested = pd.read_csv('/kaggle/input/covid19/full-list-covid-19-tests-per-day.csv')
df_usa_tested = data_daily_tested.query("Code=='USA'")
df_italy_tested = data_daily_tested.query("Entity=='Italy'")
df_spain_tested = data_daily_tested.query("Entity=='Spain'")
df_korea_tested = data_daily_tested.query("Entity=='South Korea'")
df_usa_merge = pd.merge(df_usa, df_usa_tested)
df_usa_merge = pd.merge(df_usa_merge, data_flight)
df_usa_data = df_usa_merge.drop(['Date', 'Entity', 'Code', 'US <-> Latin America', 'US <-> China', 'Canada <-> Canada', 'Canada <-> NON Canada', 'Europe <-> Europe', 'Europe <-> UK', 'Europe <-> Latin America', 'UK <-> UK', 'UK <-> NON UK', 'Italy <-> Italy', 'China <-> China', 'Brazil <-> Brazil', 'Brazil <-> NON Brazil', 'India <-> India', 'India <-> NON India', 'Iran <-> Iran'], axis=1)
quant_features = ['Daily Cases', 'Daily change in cumulative total tests', 'US <-> US', 'US <-> NON US', 'US <-> Europe', 'ConfirmedCases']
scaled_features = {}
for each in quant_features:
mean, std = (df_usa_data[each].mean(), df_usa_data[each].std())
scaled_features[each] = [mean, std]
df_usa_data.loc[:, each] = (df_usa_data[each] - mean) / std
test_data = df_usa_data[-15:]
data = df_usa_data[:-15]
target_fields = ['Daily Cases', 'Daily change in cumulative total tests', 'ConfirmedCases']
features, targets = (df_usa_data.drop(target_fields, axis=1), df_usa_data[target_fields])
test_features, test_targets = (test_data.drop(target_fields, axis=1), test_data[target_fields])
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes ** (-0.5), (self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes ** (-0.5), (self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
self.activation_function = lambda x: 1 / (1 + np.exp(-x))
def train(self, features, targets):
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
final_outputs, hidden_outputs = self.forward_pass_train(X)
delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o)
self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)
def forward_pass_train(self, X):
hidden_inputs = np.dot(X, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
return (final_outputs, hidden_outputs)
def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):
error = y - final_outputs
hidden_error = np.dot(self.weights_hidden_to_output, error)
output_error_term = error * 1
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
delta_weights_i_h += hidden_error_term * X[:, None]
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
return (delta_weights_i_h, delta_weights_h_o)
def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records
def run(self, features):
hidden_inputs = np.dot(features, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y - Y) ** 2)
iterations = 1000
learning_rate = 0.3
hidden_nodes = 7
output_nodes = 1
import sys
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train': [], 'validation': []}
for ii in range(iterations):
batch = np.random.choice(train_features.index, size=128)
X, y = (train_features.iloc[batch].values, train_targets.iloc[batch]['Daily Cases'])
network.train(X, y)
train_loss = MSE(network.run(train_features).T, train_targets['Daily Cases'].values)
val_loss = MSE(network.run(val_features).T, val_targets['Daily Cases'].values)
sys.stdout.write('\rProgress: {:2.1f}'.format(100 * ii / float(iterations)) + '% ... Training loss: ' + str(train_loss)[:5] + ' ... Validation loss: ' + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(16, 6))
mean, std = scaled_features['Daily Cases']
predictions = network.run(test_features).T * std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['Daily Cases'] * std + mean).values, label='Daily Cases')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(df_usa_merge.iloc[test_data.index]['Date'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates)))
_ = ax.set_xticklabels(dates, rotation=45) | code |
32067553/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
def add_daily_measures(df):
df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases']
df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities']
for i in range(1, len(df)):
df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases']
df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities']
df.loc[0, 'Daily Cases'] = 0
df.loc[0, 'Daily Deaths'] = 0
return df
df_world = data_train.copy()
df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_world = add_daily_measures(df_world)
df_usa = data_train.query("Country_Region=='US'")
df_usa = df_usa.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_usa = add_daily_measures(df_usa)
df_italy = data_train.query("Country_Region=='Italy'")
df_italy = df_italy.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_italy = add_daily_measures(df_italy)
df_spain = data_train.query("Country_Region=='Spain'")
df_spain = df_spain.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_spain = add_daily_measures(df_spain)
df_korea = data_train.query("Country_Region=='Korea, South'")
df_korea = df_korea.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_korea = add_daily_measures(df_korea)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_daily_tested = pd.read_csv('/kaggle/input/covid19/full-list-covid-19-tests-per-day.csv')
df_usa_tested = data_daily_tested.query("Code=='USA'")
df_italy_tested = data_daily_tested.query("Entity=='Italy'")
df_spain_tested = data_daily_tested.query("Entity=='Spain'")
df_korea_tested = data_daily_tested.query("Entity=='South Korea'")
df_usa_merge = pd.merge(df_usa, df_usa_tested)
df_usa_merge = pd.merge(df_usa_merge, data_flight)
df_usa_data = df_usa_merge.drop(['Date', 'Entity', 'Code', 'US <-> Latin America', 'US <-> China', 'Canada <-> Canada', 'Canada <-> NON Canada', 'Europe <-> Europe', 'Europe <-> UK', 'Europe <-> Latin America', 'UK <-> UK', 'UK <-> NON UK', 'Italy <-> Italy', 'China <-> China', 'Brazil <-> Brazil', 'Brazil <-> NON Brazil', 'India <-> India', 'India <-> NON India', 'Iran <-> Iran'], axis=1)
df_usa_data.plot(figsize=(12, 6)) | code |
32067553/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
def add_daily_measures(df):
df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases']
df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities']
for i in range(1, len(df)):
df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases']
df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities']
df.loc[0, 'Daily Cases'] = 0
df.loc[0, 'Daily Deaths'] = 0
return df
df_world = data_train.copy()
df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_world = add_daily_measures(df_world)
df_usa = data_train.query("Country_Region=='US'")
df_usa = df_usa.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_usa = add_daily_measures(df_usa)
df_italy = data_train.query("Country_Region=='Italy'")
df_italy = df_italy.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_italy = add_daily_measures(df_italy)
df_spain = data_train.query("Country_Region=='Spain'")
df_spain = df_spain.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_spain = add_daily_measures(df_spain)
df_korea = data_train.query("Country_Region=='Korea, South'")
df_korea = df_korea.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum()
df_korea = add_daily_measures(df_korea)
data_flight = pd.read_csv('/kaggle/input/covid19/covid19_flight_countries_mod.csv')
data_daily_tested = pd.read_csv('/kaggle/input/covid19/full-list-covid-19-tests-per-day.csv')
df_usa_tested = data_daily_tested.query("Code=='USA'")
df_italy_tested = data_daily_tested.query("Entity=='Italy'")
df_spain_tested = data_daily_tested.query("Entity=='Spain'")
df_korea_tested = data_daily_tested.query("Entity=='South Korea'")
df_usa_merge = pd.merge(df_usa, df_usa_tested)
df_usa_merge = pd.merge(df_usa_merge, data_flight)
df_usa_merge.head() | code |
32067553/cell_37 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sys
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
confirmed_total_date_Italy = data_train[data_train['Country_Region'] == 'Italy'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_Italy = data_train[data_train['Country_Region'] == 'Italy'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_Italy = confirmed_total_date_Italy.join(fatalities_total_date_Italy)
confirmed_total_date_US = data_train[data_train['Country_Region'] == 'US'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_US = data_train[data_train['Country_Region'] == 'US'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_US = confirmed_total_date_US.join(fatalities_total_date_US)
confirmed_total_date_Spain = data_train[data_train['Country_Region'] == 'Spain'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_Spain = data_train[data_train['Country_Region'] == 'Spain'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_Spain = confirmed_total_date_Spain.join(fatalities_total_date_Spain)
confirmed_total_date_Korea = data_train[data_train['Country_Region'] == 'Korea, South'].groupby(['Date']).agg({'ConfirmedCases': ['sum']})
fatalities_total_date_Korea = data_train[data_train['Country_Region'] == 'Korea, South'].groupby(['Date']).agg({'Fatalities': ['sum']})
total_date_Korea = confirmed_total_date_Korea.join(fatalities_total_date_Korea)
plt.ylim(0, 180000)
plt.ylim(0, 23000)
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes ** (-0.5), (self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes ** (-0.5), (self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
self.activation_function = lambda x: 1 / (1 + np.exp(-x))
def train(self, features, targets):
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
final_outputs, hidden_outputs = self.forward_pass_train(X)
delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o)
self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)
def forward_pass_train(self, X):
hidden_inputs = np.dot(X, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
return (final_outputs, hidden_outputs)
def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):
error = y - final_outputs
hidden_error = np.dot(self.weights_hidden_to_output, error)
output_error_term = error * 1
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
delta_weights_i_h += hidden_error_term * X[:, None]
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
return (delta_weights_i_h, delta_weights_h_o)
def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records
def run(self, features):
hidden_inputs = np.dot(features, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y - Y) ** 2)
iterations = 1000
learning_rate = 0.3
hidden_nodes = 7
output_nodes = 1
import sys
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train': [], 'validation': []}
for ii in range(iterations):
batch = np.random.choice(train_features.index, size=128)
X, y = (train_features.iloc[batch].values, train_targets.iloc[batch]['Daily Cases'])
network.train(X, y)
train_loss = MSE(network.run(train_features).T, train_targets['Daily Cases'].values)
val_loss = MSE(network.run(val_features).T, val_targets['Daily Cases'].values)
sys.stdout.write('\rProgress: {:2.1f}'.format(100 * ii / float(iterations)) + '% ... Training loss: ' + str(train_loss)[:5] + ' ... Validation loss: ' + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim() | code |
32067553/cell_5 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_path = '/kaggle/input/covid19-global-forecasting-week-4/train.csv'
data_train = pd.read_csv(data_path)
display(data_train.head())
display(data_train.describe())
display(data_train.info()) | code |
32067553/cell_36 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import sys
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes ** (-0.5), (self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes ** (-0.5), (self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
self.activation_function = lambda x: 1 / (1 + np.exp(-x))
def train(self, features, targets):
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
final_outputs, hidden_outputs = self.forward_pass_train(X)
delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o)
self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)
def forward_pass_train(self, X):
hidden_inputs = np.dot(X, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
return (final_outputs, hidden_outputs)
def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):
error = y - final_outputs
hidden_error = np.dot(self.weights_hidden_to_output, error)
output_error_term = error * 1
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
delta_weights_i_h += hidden_error_term * X[:, None]
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
return (delta_weights_i_h, delta_weights_h_o)
def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records
def run(self, features):
hidden_inputs = np.dot(features, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y - Y) ** 2)
iterations = 1000
learning_rate = 0.3
hidden_nodes = 7
output_nodes = 1
import sys
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train': [], 'validation': []}
for ii in range(iterations):
batch = np.random.choice(train_features.index, size=128)
X, y = (train_features.iloc[batch].values, train_targets.iloc[batch]['Daily Cases'])
network.train(X, y)
train_loss = MSE(network.run(train_features).T, train_targets['Daily Cases'].values)
val_loss = MSE(network.run(val_features).T, val_targets['Daily Cases'].values)
sys.stdout.write('\rProgress: {:2.1f}'.format(100 * ii / float(iterations)) + '% ... Training loss: ' + str(train_loss)[:5] + ' ... Validation loss: ' + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss) | code |
32068790/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
day_before_valid = 71 + 7 + 7
day_before_public = 78 + 7 + 7
day_before_private = df_traintest['day'][pd.isna(df_traintest['ForecastId'])].max()
def func(x):
try:
x_new = x['Country_Region'] + '/' + x['Province_State']
except:
x_new = x['Country_Region']
return x_new
df_traintest['place_id'] = df_traintest.apply(lambda x: func(x), axis=1)
df_latlong = pd.read_csv('../input/smokingstats/df_Latlong.csv')
def func(x):
try:
x_new = x['Country/Region'] + '/' + x['Province/State']
except:
x_new = x['Country/Region']
return x_new
df_latlong['place_id'] = df_latlong.apply(lambda x: func(x), axis=1)
df_latlong = df_latlong[df_latlong['place_id'].duplicated() == False]
df_traintest = pd.merge(df_traintest, df_latlong[['place_id', 'Lat', 'Long']], on='place_id', how='left')
df_traintest.head() | code |
32068790/cell_9 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
df_traintest['Date'] = pd.to_datetime(df_traintest['Date'])
df_traintest['day'] = df_traintest['Date'].apply(lambda x: x.dayofyear).astype(np.int16)
def func(x):
try:
x_new = x['Country_Region'] + '/' + x['Province_State']
except:
x_new = x['Country_Region']
return x_new
df_traintest['place_id'] = df_traintest.apply(lambda x: func(x), axis=1)
df_traintest.head() | code |
32068790/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_train.tail() | code |
32068790/cell_6 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import os, gc, pickle, copy, datetime, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn import metrics
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
df_train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
df_traintest = pd.concat([df_train, df_test])
print(df_train.shape, df_test.shape, df_traintest.shape) | code |
Subsets and Splits