path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
89139379/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv') df.shape
code
89139379/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv') df.shape df.isnull().sum()
code
74042979/cell_21
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df_test.shape df.shape df.isna().sum()[df.isna().sum() > 0] df.corr()[['SalePrice']].sort_values(by='SalePrice', ascending=False) fig, ax = plt.subplots(figsize=(16,8)) ax= sns.heatmap(df.corr(), cmap ='bwr', linewidths=.5) df.corr() sns.set(font_scale=1.5) plt.tight_layout() model = LinearRegression() target = df.loc[:, 'SalePrice'] features = df.loc[:, ['OverallQual', 'GrLivArea']] model.fit(features, target) cv_results = cross_val_score(estimator=model, X=features, y=target, cv=10, scoring='neg_root_mean_squared_error') RMSE_train = np.round(cv_results.mean(), 0) features_test = df_test.loc[:, ['OverallQual', 'GrLivArea']] fc = model.predict(features_test) si_train = RMSE_train / df.loc[:, 'SalePrice'].mean() r2_train = cross_val_score(estimator=model, X=features, y=target, cv=10, scoring='r2').mean() print('scatter index training', np.round(si_train, 2)) print('r2 cross validation training', np.round(r2_train, 2))
code
74042979/cell_9
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.shape df.info()
code
74042979/cell_23
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df_test.shape df.shape df.isna().sum()[df.isna().sum() > 0] df.corr()[['SalePrice']].sort_values(by='SalePrice', ascending=False) fig, ax = plt.subplots(figsize=(16,8)) ax= sns.heatmap(df.corr(), cmap ='bwr', linewidths=.5) df.corr() sns.set(font_scale=1.5) plt.tight_layout() model = LinearRegression() target = df.loc[:, 'SalePrice'] features = df.loc[:, ['OverallQual', 'GrLivArea']] model.fit(features, target) cv_results = cross_val_score(estimator=model, X=features, y=target, cv=10, scoring='neg_root_mean_squared_error') RMSE_train = np.round(cv_results.mean(), 0) features_test = df_test.loc[:, ['OverallQual', 'GrLivArea']] fc = model.predict(features_test) si_train = RMSE_train / df.loc[:, 'SalePrice'].mean() r2_train = cross_val_score(estimator=model, X=features, y=target, cv=10, scoring='r2').mean() df_forecast = pd.DataFrame(fc, columns=['SalePrice']) df_forecast['Id'] = df_test.loc[:, 'Id'] df_forecast.to_csv('fc_linear_1.csv', index=False) df_forecast model = LinearRegression() target = df.loc[:, 'SalePrice'] features = df.loc[:, ['OverallQual', 'GrLivArea', 'YearBuilt', 'YearRemodAdd']] model.fit(features, target) cv_results = cross_val_score(estimator=model, X=features, y=target, cv=10, scoring='neg_root_mean_squared_error') RMSE_train = np.round(cv_results.mean(), 0) print('mean of the cross validation neg RMSE', RMSE_train) features_test = df_test.loc[:, ['OverallQual', 'GrLivArea', 'YearBuilt', 'YearRemodAdd']] fc = model.predict(features_test) si_train = RMSE_train / df.loc[:, 'SalePrice'].mean() r2_train = cross_val_score(estimator=model, X=features, y=target, cv=10, scoring='r2').mean() print('scatter index training', np.round(si_train, 2)) print('r2 cross validation training', np.round(r2_train, 2))
code
74042979/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df_test.shape df.shape df.isna().sum()[df.isna().sum() > 0] df.corr()[['SalePrice']].sort_values(by='SalePrice', ascending=False) fig, ax = plt.subplots(figsize=(16,8)) ax= sns.heatmap(df.corr(), cmap ='bwr', linewidths=.5) df.corr() sns.set(font_scale=1.5) plt.tight_layout() model = LinearRegression() target = df.loc[:, 'SalePrice'] features = df.loc[:, ['OverallQual', 'GrLivArea']] model.fit(features, target) cv_results = cross_val_score(estimator=model, X=features, y=target, cv=10, scoring='neg_root_mean_squared_error') RMSE_train = np.round(cv_results.mean(), 0) print('mean of the cross validation neg RMSE', RMSE_train) features_test = df_test.loc[:, ['OverallQual', 'GrLivArea']] fc = model.predict(features_test)
code
74042979/cell_6
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv example.shape
code
74042979/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74042979/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.shape df.isna().sum()[df.isna().sum() > 0] df.corr()[['SalePrice']].sort_values(by='SalePrice', ascending=False) fig, ax = plt.subplots(figsize=(16,8)) ax= sns.heatmap(df.corr(), cmap ='bwr', linewidths=.5) df.corr() sns.pairplot(df.loc[:, ['OverallQual', 'GrLivArea', 'SalePrice', 'YearBuilt', 'YearRemodAdd']]) df.head() sns.set(font_scale=1.5) plt.tight_layout() plt.show()
code
74042979/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.shape
code
74042979/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.shape df.isna().sum()[df.isna().sum() > 0] df.corr()[['SalePrice']].sort_values(by='SalePrice', ascending=False)
code
74042979/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.shape df.isna().sum()[df.isna().sum() > 0] df.corr()[['SalePrice']].sort_values(by='SalePrice', ascending=False) fig, ax = plt.subplots(figsize=(16, 8)) ax = sns.heatmap(df.corr(), cmap='bwr', linewidths=0.5)
code
74042979/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.head()
code
74042979/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.shape df.isna().sum()[df.isna().sum() > 0] df.corr()[['SalePrice']].sort_values(by='SalePrice', ascending=False) fig, ax = plt.subplots(figsize=(16,8)) ax= sns.heatmap(df.corr(), cmap ='bwr', linewidths=.5) df.corr()
code
74042979/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.shape df.isna().sum()[df.isna().sum() > 0] sns.histplot(data=df, x='SalePrice')
code
74042979/cell_22
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df_test.shape df.shape df.isna().sum()[df.isna().sum() > 0] df.corr()[['SalePrice']].sort_values(by='SalePrice', ascending=False) fig, ax = plt.subplots(figsize=(16,8)) ax= sns.heatmap(df.corr(), cmap ='bwr', linewidths=.5) df.corr() sns.set(font_scale=1.5) plt.tight_layout() model = LinearRegression() target = df.loc[:, 'SalePrice'] features = df.loc[:, ['OverallQual', 'GrLivArea']] model.fit(features, target) cv_results = cross_val_score(estimator=model, X=features, y=target, cv=10, scoring='neg_root_mean_squared_error') RMSE_train = np.round(cv_results.mean(), 0) features_test = df_test.loc[:, ['OverallQual', 'GrLivArea']] fc = model.predict(features_test) df_forecast = pd.DataFrame(fc, columns=['SalePrice']) df_forecast['Id'] = df_test.loc[:, 'Id'] df_forecast.to_csv('fc_linear_1.csv', index=False) print(df_forecast.shape) df_forecast
code
74042979/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.shape df.describe()
code
74042979/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df.shape df.isna().sum()[df.isna().sum() > 0]
code
74042979/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') example = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') pd.read_csv df_test.shape
code
72065751/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) lin.fit(df[['children']], df.charges) lin.predict([[2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges) lin.predict([[40, 35, 2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges) lin.predict([[40, 35, 2]])
code
72065751/cell_13
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) lin.fit(df[['children']], df.charges) lin.predict([[2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges) lin.predict([[40, 35, 2]])
code
72065751/cell_9
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) plt.scatter(df.bmi, df.charges) plt.xlabel('BMI') plt.ylabel('Charges') lin.fit(df[['bmi']], df.charges) plt.plot(df.bmi, lin.predict(df[['bmi']]), color='red') lin.predict([[35]])
code
72065751/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df df.describe()
code
72065751/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) lin.fit(df[['children']], df.charges) lin.predict([[2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges) df1 = df[df['smoker'] == 'yes'] df1
code
72065751/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df
code
72065751/cell_8
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() plt.scatter(df.age, df.charges) plt.xlabel('Age') plt.ylabel('Charges') lin.fit(df[['age']], df.charges) plt.plot(df.age, lin.predict(df[['age']]), color='red') lin.predict([[40]])
code
72065751/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation import seaborn as sns #Visualization df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) lin.fit(df[['children']], df.charges) lin.predict([[2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges) sns.scatterplot(x='bmi', y='charges', data=df, palette='viridis', hue='smoker')
code
72065751/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation import seaborn as sns #Visualization df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) lin.fit(df[['children']], df.charges) lin.predict([[2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges) sns.scatterplot(x='age', y='charges', data=df, palette='magma', hue='sex')
code
72065751/cell_17
[ "text_plain_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation import seaborn as sns #Visualization df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) lin.fit(df[['children']], df.charges) lin.predict([[2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges) sns.scatterplot(x='bmi', y='charges', data=df, palette='viridis', hue='sex')
code
72065751/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation import seaborn as sns #Visualization df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) lin.fit(df[['children']], df.charges) lin.predict([[2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges) sns.scatterplot(x='age', y='charges', data=df, palette='magma', hue='smoker')
code
72065751/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) lin.fit(df[['children']], df.charges) lin.predict([[2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges) lin.predict([[40, 35, 2]]) df1 = df[df['smoker'] == 'yes'] df1 lin.fit(df[['age', 'bmi', 'children']], df.charges) lin.predict([[40, 35, 2]]) lin.fit(df1[['age', 'bmi', 'children']], df1.charges) lin.predict([[40, 35, 2]])
code
72065751/cell_10
[ "text_html_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) plt.scatter(df.children, df.charges) plt.xlabel('Children') plt.ylabel('Charges') lin.fit(df[['children']], df.charges) plt.plot(df.children, lin.predict(df[['children']]), color='red') lin.predict([[2]])
code
72065751/cell_12
[ "text_html_output_1.png" ]
from sklearn import linear_model import matplotlib.pyplot as plt # Visualization import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df from sklearn import linear_model lin = linear_model.LinearRegression() lin.fit(df[['age']], df.charges) lin.predict([[40]]) lin.fit(df[['bmi']], df.charges) lin.predict([[35]]) lin.fit(df[['children']], df.charges) lin.predict([[2]]) lin.fit(df[['age', 'bmi', 'children']], df.charges)
code
72065751/cell_5
[ "text_html_output_1.png" ]
import pandas as pd #Data manipulation df = pd.read_csv('../input/insurance/insurance.csv') df df.info()
code
73100919/cell_16
[ "text_html_output_1.png" ]
from lightgbm import LGBMRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold from sklearn.preprocessing import LabelEncoder from tqdm import tqdm import numpy as np import pandas as pd X = pd.read_csv('../input/30-days-of-ml/train.csv', encoding='utf-8', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', encoding='utf-8', index_col=0) y = X['target'] X = X.drop(['target'], axis=1) label = LabelEncoder() categorical_feature = np.where(X.dtypes != 'float64')[0].tolist() categorical_feature_columns = X.select_dtypes(exclude=['float64']).columns for column in categorical_feature_columns: label.fit(X[column]) X[column] = label.transform(X[column]) test[column] = label.transform(test[column]) lgbm_parameters = {'metric': 'rmse', 'n_jobs': -1, 'n_estimators': 50000, 'reg_alpha': 10.924491968127692, 'reg_lambda': 17.396730654687218, 'colsample_bytree': 0.21497646795452627, 'subsample': 0.7582562557431147, 'learning_rate': 0.009985133666265425, 'max_depth': 18, 'num_leaves': 63, 'min_child_samples': 27, 'max_bin': 523, 'cat_l2': 0.025083670064082797} lgbm_val_pred = np.zeros(len(y)) lgbm_test_pred = np.zeros(len(test)) mse = [] kf = KFold(n_splits=10, shuffle=True) for trn_idx, val_idx in tqdm(kf.split(X, y)): x_train_idx = X.iloc[trn_idx] y_train_idx = y.iloc[trn_idx] x_valid_idx = X.iloc[val_idx] y_valid_idx = y.iloc[val_idx] lgbm_model = LGBMRegressor(**lgbm_parameters) lgbm_model.fit(x_train_idx, y_train_idx, eval_set=(x_valid_idx, y_valid_idx), verbose=-1, early_stopping_rounds=400, categorical_feature=categorical_feature) lgbm_test_pred += lgbm_model.predict(test) / 10 mse.append(mean_squared_error(y_valid_idx, lgbm_model.predict(x_valid_idx)))
code
73100919/cell_3
[ "text_html_output_1.png" ]
import warnings import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder from lightgbm import LGBMRegressor from sklearn.model_selection import KFold from tqdm import tqdm from sklearn.metrics import mean_squared_error import warnings warnings.filterwarnings('ignore')
code
73100919/cell_5
[ "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_11.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_14.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_18.png", "application_vnd.jupyter.stderr_output_19.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_16.png", "application_vnd.jupyter.stderr_output_15.png", "text_plain_output_8.png", "application_vnd.jupyter.stderr_output_17.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_12.png", "application_vnd.jupyter.stderr_output_21.png" ]
import pandas as pd X = pd.read_csv('../input/30-days-of-ml/train.csv', encoding='utf-8', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', encoding='utf-8', index_col=0) y = X['target'] X = X.drop(['target'], axis=1) X.head()
code
2014551/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) asi = pd.read_csv('../input/air_store_info.csv') air = pd.merge(avd_di, asi, on=['air_store_id']) airdata = air airdata['year'] = air['visit_date'].dt.year airdata['month'] = air['visit_date'].dt.month airdata['day'] = air['visit_date'].dt.day airdata = airdata.drop(['visit_date'], axis=1) airdata = airdata[airdata.air_store_id.isin(air_selected_store_ids)] visitors_data = airdata['visitors'] features_data = airdata.drop('visitors', axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() numerical = ['latitude', 'longitude', 'year', 'month', 'day'] features_minmax_transform = pd.DataFrame(data=features_data) features_minmax_transform[numerical] = scaler.fit_transform(features_data[numerical]) display(features_minmax_transform.head(n=5)) air_selected_store_ids = features_minmax_transform['air_store_id'].unique() print(len(air_selected_store_ids))
code
2014551/cell_4
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) display(features_to_train.head()) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) print(len(features_to_train.air_store_id.unique())) print(len(features_to_train.visit_date.unique())) display(features_to_train.head()) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) print(len(features_to_train)) display(features_to_train.head()) air_selected_store_ids = features_to_train.air_store_id.unique() print(len(air_selected_store_ids))
code
2014551/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) asi = pd.read_csv('../input/air_store_info.csv') display(asi.head()) print('Total AIR restaurents: ', len(asi)) print('Total AIR restaurents in ASI', len(asi.air_store_id.value_counts()))
code
2014551/cell_2
[ "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2014551/cell_11
[ "text_html_output_2.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_html_output_3.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) asi = pd.read_csv('../input/air_store_info.csv') air = pd.merge(avd_di, asi, on=['air_store_id']) airdata = air airdata['year'] = air['visit_date'].dt.year airdata['month'] = air['visit_date'].dt.month airdata['day'] = air['visit_date'].dt.day airdata = airdata.drop(['visit_date'], axis=1) print(len(airdata.air_store_id.unique())) airdata = airdata[airdata.air_store_id.isin(air_selected_store_ids)] print(len(airdata.air_store_id.unique())) visitors_data = airdata['visitors'] features_data = airdata.drop('visitors', axis=1)
code
2014551/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) asi = pd.read_csv('../input/air_store_info.csv') air = pd.merge(avd_di, asi, on=['air_store_id']) display(air.head(2)) print('Total AIR restaurents count: ', len(air.air_store_id.value_counts()))
code
2014551/cell_15
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) asi = pd.read_csv('../input/air_store_info.csv') air = pd.merge(avd_di, asi, on=['air_store_id']) airdata = air airdata['year'] = air['visit_date'].dt.year airdata['month'] = air['visit_date'].dt.month airdata['day'] = air['visit_date'].dt.day airdata = airdata.drop(['visit_date'], axis=1) airdata = airdata[airdata.air_store_id.isin(air_selected_store_ids)] visitors_data = airdata['visitors'] features_data = airdata.drop('visitors', axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() numerical = ['latitude', 'longitude', 'year', 'month', 'day'] features_minmax_transform = pd.DataFrame(data=features_data) features_minmax_transform[numerical] = scaler.fit_transform(features_data[numerical]) air_selected_store_ids = features_minmax_transform['air_store_id'].unique() features_final = pd.get_dummies(features_minmax_transform) encoded = list(features_final.columns) print(len(encoded), ' total features after one-hot encoding.') print(encoded)
code
2014551/cell_16
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.preprocessing import MinMaxScaler from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) asi = pd.read_csv('../input/air_store_info.csv') air = pd.merge(avd_di, asi, on=['air_store_id']) airdata = air airdata['year'] = air['visit_date'].dt.year airdata['month'] = air['visit_date'].dt.month airdata['day'] = air['visit_date'].dt.day airdata = airdata.drop(['visit_date'], axis=1) airdata = airdata[airdata.air_store_id.isin(air_selected_store_ids)] visitors_data = airdata['visitors'] features_data = airdata.drop('visitors', axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() numerical = ['latitude', 'longitude', 'year', 'month', 'day'] features_minmax_transform = pd.DataFrame(data=features_data) features_minmax_transform[numerical] = scaler.fit_transform(features_data[numerical]) air_selected_store_ids = features_minmax_transform['air_store_id'].unique() features_final = pd.get_dummies(features_minmax_transform) encoded = list(features_final.columns) from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(features_final, visitors_data, test_size=0.2, random_state=0) print('Training set has ', X_train.shape[0], ' samples.') print('Testing set has ', X_test.shape[0], ' samples.') print(X_train.shape, X_test.shape, len(y_train), len(y_train) / 10, len(y_train) / 100)
code
2014551/cell_14
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) asi = pd.read_csv('../input/air_store_info.csv') air = pd.merge(avd_di, asi, on=['air_store_id']) airdata = air airdata['year'] = air['visit_date'].dt.year airdata['month'] = air['visit_date'].dt.month airdata['day'] = air['visit_date'].dt.day airdata = airdata.drop(['visit_date'], axis=1) airdata = airdata[airdata.air_store_id.isin(air_selected_store_ids)] visitors_data = airdata['visitors'] features_data = airdata.drop('visitors', axis=1) print(len(features_data.air_store_id.unique())) print(len(features_data.day_of_week.unique())) print(len(features_data.air_genre_name.unique())) print(len(features_data.air_area_name.unique())) print(6)
code
2014551/cell_22
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.metrics import make_scorer from sklearn.grid_search import GridSearchCV from sklearn.cross_validation import ShuffleSplit from sklearn import tree from sklearn.ensemble import AdaBoostRegressor from sklearn.linear_model import BayesianRidge, LinearRegression from time import time
code
2014551/cell_10
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) asi = pd.read_csv('../input/air_store_info.csv') air = pd.merge(avd_di, asi, on=['air_store_id']) airdata = air airdata['year'] = air['visit_date'].dt.year airdata['month'] = air['visit_date'].dt.month airdata['day'] = air['visit_date'].dt.day airdata = airdata.drop(['visit_date'], axis=1) airdata.head()
code
2014551/cell_12
[ "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_1.png", "text_html_output_3.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) asi = pd.read_csv('../input/air_store_info.csv') air = pd.merge(avd_di, asi, on=['air_store_id']) airdata = air airdata['year'] = air['visit_date'].dt.year airdata['month'] = air['visit_date'].dt.month airdata['day'] = air['visit_date'].dt.day airdata = airdata.drop(['visit_date'], axis=1) airdata = airdata[airdata.air_store_id.isin(air_selected_store_ids)] visitors_data = airdata['visitors'] features_data = airdata.drop('visitors', axis=1) print('Check the training data: ') display(visitors_data.head()) display(features_data.head())
code
2014551/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt from datetime import date pd.set_option('display.float_format', lambda x: '%.5f' % x) from subprocess import check_output sample_result = pd.read_csv('../input/sample_submission.csv') features_to_train = sample_result.copy(deep=True) features_to_train['air_store_id'] = features_to_train.id.str.slice(0, 20) features_to_train['visit_date'] = pd.to_datetime(features_to_train.id.str.slice(21, 31)) features_to_train.drop(['visitors', 'id'], axis=1, inplace=True) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) features_to_train = pd.merge(features_to_train, di, left_on='visit_date', right_on='calendar_date', how='left') features_to_train.drop(['calendar_date'], axis=1, inplace=True) airstore = pd.read_csv('../input/air_store_info.csv') features_to_train = pd.merge(features_to_train, airstore, on=['air_store_id']) result_data_stub = features_to_train features_to_train['year'] = features_to_train['visit_date'].dt.year features_to_train['month'] = features_to_train['visit_date'].dt.month features_to_train['day'] = features_to_train['visit_date'].dt.day features_to_train = features_to_train.drop(['visit_date'], axis=1) air_selected_store_ids = features_to_train.air_store_id.unique() avd = pd.read_csv('../input/air_visit_data.csv', parse_dates=['visit_date']) di = pd.read_csv('../input/date_info.csv', parse_dates=['calendar_date']) display(avd.head(2)) display(di.head(2)) avd_di = pd.merge(avd, di, left_on='visit_date', right_on='calendar_date', how='left') avd_di.drop(['calendar_date'], axis=1, inplace=True) display(avd_di.head(2)) print('The AIR visitor data count', len(avd), '. The AIR visitor data with holiday info : ', len(avd_di), 'records.') print('Total AIR restaurents in AVD', len(avd.air_store_id.value_counts()))
code
16148624/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['figure.figsize'] = (16, 10) df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features = sorted(num_features) num_features corrplot = sns.heatmap(df_train[num_features].corr(), cmap=plt.cm.Reds, annot=True) g = sns.FacetGrid(df_train, col='Survived') g.map(sns.distplot, 'Pclass') sns.boxplot(data=df, x='Fare', y='Pclass', orient='h')
code
16148624/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() print('{:.2f}% survival rate, {} out of {} not survived'.format(df_train.Survived.sum() / len(df_train) * 100, df_train.Survived.sum(), len(df_train)))
code
16148624/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['figure.figsize'] = (16, 10) df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features = sorted(num_features) num_features corrplot = sns.heatmap(df_train[num_features].corr(), cmap=plt.cm.Reds, annot=True) g = sns.FacetGrid(df_train, col='Survived') g.map(sns.distplot, 'Pclass') df_train.groupby('Pclass').agg(['mean', 'count'])['Survived'] df_train.Age.isnull().sum()
code
16148624/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.info()
code
16148624/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min()
code
16148624/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features = sorted(num_features) num_features df_train[num_features].describe()
code
16148624/cell_19
[ "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() df_train.groupby('Pclass').agg(['mean', 'count'])['Survived']
code
16148624/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['figure.figsize'] = (16, 10) df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features = sorted(num_features) num_features corrplot = sns.heatmap(df_train[num_features].corr(), cmap=plt.cm.Reds, annot=True) g = sns.FacetGrid(df_train, col='Survived') g.map(sns.distplot, 'Pclass')
code
16148624/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['figure.figsize'] = (16, 10) df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features = sorted(num_features) num_features corrplot = sns.heatmap(df_train[num_features].corr(), cmap=plt.cm.Reds, annot=True) g = sns.FacetGrid(df_train, col='Survived') g.map(sns.distplot, 'Pclass') df_train.groupby('Pclass').agg(['mean', 'count'])['Survived'] df_train.Age.isnull().sum() df_train.Age.isnull().sum()
code
16148624/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() print('{} numerical features:\n{} \nand {} categorical features:\n{}'.format(len(num_features), num_features, len(cat_features), cat_features))
code
16148624/cell_15
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features = sorted(num_features) num_features abs(df_train[num_features].corr()['Survived']).sort_values(ascending=False)
code
16148624/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['figure.figsize'] = (16, 10) df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features = sorted(num_features) num_features corrplot = sns.heatmap(df_train[num_features].corr(), cmap=plt.cm.Reds, annot=True) g = sns.FacetGrid(df_train, col='Survived') g.map(sns.distplot, 'Pclass') df_train.groupby('Pclass').agg(['mean', 'count'])['Survived'] sns.distplot(df_train[df_train.Age.notnull()].Age)
code
16148624/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['figure.figsize'] = (16, 10) df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features = sorted(num_features) num_features corrplot = sns.heatmap(df_train[num_features].corr(), cmap=plt.cm.Reds, annot=True)
code
16148624/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_train['dataset'] = 'train' df_test = pd.read_csv('../input/test.csv') df_test['dataset'] = 'test' df = pd.concat([df_train, df_test], sort=True, copy=False) df_train.nunique().min() num_features = df_train.select_dtypes(['float64', 'int64']).columns.tolist() cat_features = df_train.select_dtypes(['object']).columns.tolist() num_features.remove('PassengerId') num_features = sorted(num_features) num_features
code
34130785/cell_13
[ "text_html_output_1.png" ]
from keras.preprocessing import sequence from keras.preprocessing.text import Tokenizer import pandas as pd datasets_dir = '' vnrows = None datasets_dir = '../input/' df = pd.read_csv(datasets_dir + 'fakenews_preprocessed_35k.csv', nrows=vnrows, encoding='utf-8') df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df = df.sample(frac=1).reset_index(drop=True) max_seq_len = 458 MAX_NB_WORDS = round(int(df.doc_length.sum() / max_seq_len)) tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, char_level=False) tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index word_seq_train = tokenizer.texts_to_sequences(X_train) word_seq_train = sequence.pad_sequences(word_seq_train, maxlen=max_seq_len) word_seq_train.shape
code
34130785/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd datasets_dir = '' vnrows = None datasets_dir = '../input/' df = pd.read_csv(datasets_dir + 'fakenews_preprocessed_35k.csv', nrows=vnrows, encoding='utf-8') df = df.loc[:, ~df.columns.str.contains('^Unnamed')] print(df.groupby(['label'])['label'].count()) df = df.sample(frac=1).reset_index(drop=True)
code
34130785/cell_20
[ "text_plain_output_1.png" ]
from gensim.models import Word2Vec from keras import optimizers from keras import regularizers from keras.callbacks import EarlyStopping from keras.layers import Dense, Activation, Dropout, Flatten,Input from keras.layers import Embedding, Conv1D, MaxPooling1D, Dense from keras.models import Sequential from keras.preprocessing import sequence from keras.preprocessing.text import Tokenizer import numpy as np import pandas as pd datasets_dir = '' vnrows = None datasets_dir = '../input/' df = pd.read_csv(datasets_dir + 'fakenews_preprocessed_35k.csv', nrows=vnrows, encoding='utf-8') df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df = df.sample(frac=1).reset_index(drop=True) max_seq_len = 458 MAX_NB_WORDS = round(int(df.doc_length.sum() / max_seq_len)) tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, char_level=False) tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index word_seq_train = tokenizer.texts_to_sequences(X_train) word_seq_train = sequence.pad_sequences(word_seq_train, maxlen=max_seq_len) word_seq_train.shape gensim_news_desc = [] chunk_data = X_train for record in range(0, len(chunk_data)): news_desc_list = [] for tok in chunk_data[record].split(): news_desc_list.append(str(tok)) gensim_news_desc.append(news_desc_list) vsize = max_seq_len vmin_count = 4 gensim_model = Word2Vec(gensim_news_desc, min_count=vmin_count, size=vsize, sg=1) words = list(gensim_model.wv.vocab) batch_size = 1024 num_epochs = 10 num_filters = 128 embed_dim = max_seq_len weight_decay = 0.0001 class_weight = {0: 1, 1: 1} gensim_words_not_found = [] gensim_nb_words = len(gensim_model.wv.vocab) gensim_embedding_matrix = np.zeros((gensim_nb_words, embed_dim)) for word, i in word_index.items(): if i >= gensim_nb_words: continue if word in gensim_model.wv.vocab: embedding_vector = gensim_model.wv[word] if embedding_vector is not None and len(embedding_vector) > 0: gensim_embedding_matrix[i] = embedding_vector else: gensim_words_not_found.append(word) model = Sequential() model.add(Embedding(gensim_nb_words, embed_dim, weights=[gensim_embedding_matrix], input_length=max_seq_len)) model.add(Conv1D(num_filters, 5, activation='relu', padding='same')) model.add(MaxPooling1D(2)) model.add(Dense(64, activation='relu', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Dropout(0.5)) model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=4, verbose=1) callbacks_list = [early_stopping] hist = model.fit(word_seq_train, y_train, batch_size=batch_size, epochs=num_epochs, callbacks=callbacks_list, validation_split=0.1, shuffle=True, verbose=2, class_weight=class_weight)
code
34130785/cell_11
[ "text_plain_output_1.png" ]
from keras.preprocessing.text import Tokenizer import pandas as pd datasets_dir = '' vnrows = None datasets_dir = '../input/' df = pd.read_csv(datasets_dir + 'fakenews_preprocessed_35k.csv', nrows=vnrows, encoding='utf-8') df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df = df.sample(frac=1).reset_index(drop=True) max_seq_len = 458 MAX_NB_WORDS = round(int(df.doc_length.sum() / max_seq_len)) tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, char_level=False) tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index print('dictionary size: ', len(word_index))
code
34130785/cell_18
[ "text_plain_output_1.png" ]
from gensim.models import Word2Vec from keras.preprocessing.text import Tokenizer import numpy as np import pandas as pd datasets_dir = '' vnrows = None datasets_dir = '../input/' df = pd.read_csv(datasets_dir + 'fakenews_preprocessed_35k.csv', nrows=vnrows, encoding='utf-8') df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df = df.sample(frac=1).reset_index(drop=True) max_seq_len = 458 MAX_NB_WORDS = round(int(df.doc_length.sum() / max_seq_len)) tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, char_level=False) tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index gensim_news_desc = [] chunk_data = X_train for record in range(0, len(chunk_data)): news_desc_list = [] for tok in chunk_data[record].split(): news_desc_list.append(str(tok)) gensim_news_desc.append(news_desc_list) vsize = max_seq_len vmin_count = 4 gensim_model = Word2Vec(gensim_news_desc, min_count=vmin_count, size=vsize, sg=1) words = list(gensim_model.wv.vocab) batch_size = 1024 num_epochs = 10 num_filters = 128 embed_dim = max_seq_len weight_decay = 0.0001 class_weight = {0: 1, 1: 1} print('preparing embedding matrix...') gensim_words_not_found = [] gensim_nb_words = len(gensim_model.wv.vocab) print('gensim_nb_words : ', gensim_nb_words) gensim_embedding_matrix = np.zeros((gensim_nb_words, embed_dim)) for word, i in word_index.items(): if i >= gensim_nb_words: continue if word in gensim_model.wv.vocab: embedding_vector = gensim_model.wv[word] if embedding_vector is not None and len(embedding_vector) > 0: gensim_embedding_matrix[i] = embedding_vector else: gensim_words_not_found.append(word) print(gensim_embedding_matrix.shape)
code
34130785/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
print(len(X_train)) print(len(X_test)) print(len(y_train)) print(len(y_test))
code
34130785/cell_15
[ "text_plain_output_1.png" ]
from gensim.models import Word2Vec max_seq_len = 458 gensim_news_desc = [] chunk_data = X_train for record in range(0, len(chunk_data)): news_desc_list = [] for tok in chunk_data[record].split(): news_desc_list.append(str(tok)) gensim_news_desc.append(news_desc_list) vsize = max_seq_len vmin_count = 4 gensim_model = Word2Vec(gensim_news_desc, min_count=vmin_count, size=vsize, sg=1) print(gensim_model)
code
34130785/cell_17
[ "text_plain_output_1.png" ]
from gensim.models import Word2Vec max_seq_len = 458 gensim_news_desc = [] chunk_data = X_train for record in range(0, len(chunk_data)): news_desc_list = [] for tok in chunk_data[record].split(): news_desc_list.append(str(tok)) gensim_news_desc.append(news_desc_list) vsize = max_seq_len vmin_count = 4 gensim_model = Word2Vec(gensim_news_desc, min_count=vmin_count, size=vsize, sg=1) words = list(gensim_model.wv.vocab) words[0:3]
code
34130785/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd datasets_dir = '' vnrows = None datasets_dir = '../input/' df = pd.read_csv(datasets_dir + 'fakenews_preprocessed_35k.csv', nrows=vnrows, encoding='utf-8') df = df.loc[:, ~df.columns.str.contains('^Unnamed')] df = df.sample(frac=1).reset_index(drop=True) df[['label', 'target_text']].head(5)
code
89135227/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import warnings pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) warnings.simplefilter(action='ignore') df = pd.read_csv('../input/sepsis-dataset/Dataset.csv') df.head()
code
89122519/cell_9
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
def max_digit(n, d, r): max_number = -1 n = abs(n) while n > 0: digit = n % 10 if digit % d == r: if digit > max_number: max_number = digit n //= 10 return max_number def main(): n = int(input()) d = int(input()) r = int(input()) main() def binom(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res /= m for n in range(1, n - k + 1): res /= n return int(res) def binomf(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res = float(res / m) for n in range(1, n - k + 1): res = float(res / n) return res def main(): n = int(input()) k = int(input()) main() def prime(n): for i in range(2, n): if n % i == 0: return False else: return True def main(): continue_or_not = True while continue_or_not: n = int(input()) if n <= 0: continue_or_not = False main() def rich(n): total_divisors = 0 for i in range(1, n): if n % i == 0: total_divisors += i if total_divisors > n: return True else: return False def main(): k = abs(int(input('Enter a integer: '))) main() def f(x): if x < 2: return f(17 - abs(x)) if x > 1: divisor = 0 for i in range(2, x): if x % i == 0: divisor = i if divisor != 0: d = divisor return f(d) else: return x def main(): continue_or_not = True while continue_or_not: x = int(input('Enter a integer: ')) if x != 0: print(f(x)) if x == 0: continue_or_not = False main()
code
89122519/cell_11
[ "text_plain_output_1.png" ]
def max_digit(n, d, r): max_number = -1 n = abs(n) while n > 0: digit = n % 10 if digit % d == r: if digit > max_number: max_number = digit n //= 10 return max_number def main(): n = int(input()) d = int(input()) r = int(input()) main() def binom(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res /= m for n in range(1, n - k + 1): res /= n return int(res) def binomf(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res = float(res / m) for n in range(1, n - k + 1): res = float(res / n) return res def main(): n = int(input()) k = int(input()) main() def prime(n): for i in range(2, n): if n % i == 0: return False else: return True def main(): continue_or_not = True while continue_or_not: n = int(input()) if n <= 0: continue_or_not = False main() def rich(n): total_divisors = 0 for i in range(1, n): if n % i == 0: total_divisors += i if total_divisors > n: return True else: return False def main(): k = abs(int(input('Enter a integer: '))) main() def f(x): if x < 2: return f(17 - abs(x)) if x > 1: divisor = 0 for i in range(2, x): if x % i == 0: divisor = i if divisor != 0: d = divisor return f(d) else: return x def main(): continue_or_not = True while continue_or_not: x = int(input('Enter a integer: ')) if x == 0: continue_or_not = False main() def g(x): return int(x / 2) + 1 def h(x): return int(x / 2 + 1) def f(x): if x < 9: return x * x if x >= 9: if x % 2 == 0: return f(g(x)) else: return f(h(x + 1)) '\nThe above code can also write as below: (another way)\ndef g(x):\n return f(int(x/2)+1)\ndef h(x):\n return f(int(x/2+1))\ndef f(x):\n if x<9:\n return x*x\n if x>=9:\n if (x%2==0):\n return g(x)\n else:\n return h(x+1)\n' def main(): continue_or_not = True while continue_or_not: x = int(input('Enter a integer: ')) if x != 0: print(f(x)) if x == 0: continue_or_not = False main()
code
89122519/cell_1
[ "text_plain_output_1.png" ]
def max_digit(n, d, r): max_number = -1 n = abs(n) while n > 0: digit = n % 10 if digit % d == r: if digit > max_number: max_number = digit n //= 10 return max_number def main(): n = int(input()) d = int(input()) r = int(input()) print(max_digit(n, d, r)) main()
code
89122519/cell_7
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
def max_digit(n, d, r): max_number = -1 n = abs(n) while n > 0: digit = n % 10 if digit % d == r: if digit > max_number: max_number = digit n //= 10 return max_number def main(): n = int(input()) d = int(input()) r = int(input()) main() def binom(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res /= m for n in range(1, n - k + 1): res /= n return int(res) def binomf(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res = float(res / m) for n in range(1, n - k + 1): res = float(res / n) return res def main(): n = int(input()) k = int(input()) main() def prime(n): for i in range(2, n): if n % i == 0: return False else: return True def main(): continue_or_not = True while continue_or_not: n = int(input()) if n <= 0: continue_or_not = False main() def rich(n): total_divisors = 0 for i in range(1, n): if n % i == 0: total_divisors += i if total_divisors > n: print(n) return True else: return False def main(): k = abs(int(input('Enter a integer: '))) for n in range(1, k): print(rich(n)) main()
code
89122519/cell_3
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
def max_digit(n, d, r): max_number = -1 n = abs(n) while n > 0: digit = n % 10 if digit % d == r: if digit > max_number: max_number = digit n //= 10 return max_number def main(): n = int(input()) d = int(input()) r = int(input()) main() def binom(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res /= m for n in range(1, n - k + 1): res /= n return int(res) def binomf(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res = float(res / m) for n in range(1, n - k + 1): res = float(res / n) return res def main(): n = int(input()) k = int(input()) print(binom(n, k)) print(binomf(n, k)) main()
code
89122519/cell_5
[ "text_plain_output_1.png" ]
def max_digit(n, d, r): max_number = -1 n = abs(n) while n > 0: digit = n % 10 if digit % d == r: if digit > max_number: max_number = digit n //= 10 return max_number def main(): n = int(input()) d = int(input()) r = int(input()) main() def binom(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res /= m for n in range(1, n - k + 1): res /= n return int(res) def binomf(n, k): res = 1 if n < 0 or k < 0: return 0 else: for i in range(1, n + 1): res *= i for m in range(1, k + 1): res = float(res / m) for n in range(1, n - k + 1): res = float(res / n) return res def main(): n = int(input()) k = int(input()) main() def prime(n): for i in range(2, n): if n % i == 0: return False else: return True def main(): continue_or_not = True while continue_or_not: n = int(input()) if n > 0: print(prime(n)) if n <= 0: continue_or_not = False main()
code
72092307/cell_23
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error from sklearn.preprocessing import OrdinalEncoder from xgboost import XGBRegressor import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.shape train.info y = train['target'] features = train.drop(['target'], axis=1) col_data = [] for col in features.columns: if 'cat' in col: col_data.append(col) X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[col_data] = ordinal_encoder.fit_transform(features[col_data]) X_test[col_data] = ordinal_encoder.transform(test[col_data]) from sklearn.metrics import mean_squared_error from xgboost import XGBRegressor model = XGBRegressor() model.fit(X_train, y_train) preds = model.predict(X_valid) sub = pd.read_csv('/kaggle/input/30-days-of-ml/sample_submission.csv') model.fit(X, y) preds = model.predict(X_test) sub.target = preds sub.to_csv('submission.csv', index=False) print('Saved')
code
72092307/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) sub = pd.read_csv('/kaggle/input/30-days-of-ml/sample_submission.csv') sub.head()
code
72092307/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.shape train.head()
code
72092307/cell_19
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error from xgboost import XGBRegressor import numpy as np # linear algebra from sklearn.metrics import mean_squared_error from xgboost import XGBRegressor model = XGBRegressor() model.fit(X_train, y_train) preds = model.predict(X_valid) print('RMAE: ', np.sqrt(mean_squared_error(y_valid, preds)))
code
72092307/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72092307/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.shape train.describe()
code
72092307/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.shape train.info
code
72092307/cell_16
[ "text_html_output_1.png" ]
print(X_train)
code
72092307/cell_17
[ "text_plain_output_1.png" ]
print(y_train)
code
72092307/cell_14
[ "text_html_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.shape train.info y = train['target'] features = train.drop(['target'], axis=1) col_data = [] for col in features.columns: if 'cat' in col: col_data.append(col) X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[col_data] = ordinal_encoder.fit_transform(features[col_data]) X_test[col_data] = ordinal_encoder.transform(test[col_data]) X.head()
code
72092307/cell_22
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error from sklearn.preprocessing import OrdinalEncoder from xgboost import XGBRegressor import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.shape train.info y = train['target'] features = train.drop(['target'], axis=1) col_data = [] for col in features.columns: if 'cat' in col: col_data.append(col) X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[col_data] = ordinal_encoder.fit_transform(features[col_data]) X_test[col_data] = ordinal_encoder.transform(test[col_data]) from sklearn.metrics import mean_squared_error from xgboost import XGBRegressor model = XGBRegressor() model.fit(X_train, y_train) preds = model.predict(X_valid) sub = pd.read_csv('/kaggle/input/30-days-of-ml/sample_submission.csv') model.fit(X, y) preds = model.predict(X_test) sub.target = preds sub.head()
code
72092307/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.shape train.info y = train['target'] features = train.drop(['target'], axis=1) print(y)
code
72092307/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.shape
code
73069645/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() #we will skip first column for now and begin from host_id #let's see what hosts (IDs) have the most listings on Airbnb platform and taking advantage of this service top_host=data.host_id.value_counts().head(10) top_host sns.set(rc={'figure.figsize': (10, 8)}) sns.set_style('white') top_host_df = pd.DataFrame(top_host) top_host_df.reset_index(inplace=True) top_host_df.rename(columns={'index': 'Host_ID', 'host_id': 'P_Count'}, inplace=True) top_host_df viz_1 = sns.barplot(x='Host_ID', y='P_Count', data=top_host_df, palette='Blues_d') viz_1.set_title('Hosts with the most listings in NYC') viz_1.set_ylabel('Count of listings') viz_1.set_xlabel('Host IDs') viz_1.set_xticklabels(viz_1.get_xticklabels(), rotation=45)
code
73069645/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique()
code
73069645/cell_25
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() #we will skip first column for now and begin from host_id #let's see what hosts (IDs) have the most listings on Airbnb platform and taking advantage of this service top_host=data.host_id.value_counts().head(10) top_host top_host_check = data.calculated_host_listings_count.max() top_host_check sns.set(rc={'figure.figsize': (10, 8)}) sns.set_style('white') top_host_df = pd.DataFrame(top_host) top_host_df.reset_index(inplace=True) top_host_df.rename(columns={'index': 'Host_ID', 'host_id': 'P_Count'}, inplace=True) top_host_df viz_1=sns.barplot(x="Host_ID", y="P_Count", data=top_host_df, palette='Blues_d') viz_1.set_title('Hosts with the most listings in NYC') viz_1.set_ylabel('Count of listings') viz_1.set_xlabel('Host IDs') viz_1.set_xticklabels(viz_1.get_xticklabels(), rotation=45) sub_1 = data.loc[data['neighbourhood_group'] == 'Brooklyn'] price_sub1 = sub_1[['price']] sub_2 = data.loc[data['neighbourhood_group'] == 'Manhattan'] price_sub2 = sub_2[['price']] sub_3 = data.loc[data['neighbourhood_group'] == 'Queens'] price_sub3 = sub_3[['price']] sub_4 = data.loc[data['neighbourhood_group'] == 'Staten Island'] price_sub4 = sub_4[['price']] sub_5 = data.loc[data['neighbourhood_group'] == 'Bronx'] price_sub5 = sub_5[['price']] price_list_by_n = [price_sub1, price_sub2, price_sub3, price_sub4, price_sub5] sub_6 = data[data.price < 500] viz_2 = sns.violinplot(data=sub_6, x='neighbourhood_group', y='price') viz_2.set_title('Density and distribution of prices for each neighberhood_group')
code
73069645/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() #we will skip first column for now and begin from host_id #let's see what hosts (IDs) have the most listings on Airbnb platform and taking advantage of this service top_host=data.host_id.value_counts().head(10) top_host top_host_check = data.calculated_host_listings_count.max() top_host_check sns.set(rc={'figure.figsize': (10, 8)}) sns.set_style('white') top_host_df = pd.DataFrame(top_host) top_host_df.reset_index(inplace=True) top_host_df.rename(columns={'index': 'Host_ID', 'host_id': 'P_Count'}, inplace=True) top_host_df viz_1=sns.barplot(x="Host_ID", y="P_Count", data=top_host_df, palette='Blues_d') viz_1.set_title('Hosts with the most listings in NYC') viz_1.set_ylabel('Count of listings') viz_1.set_xlabel('Host IDs') viz_1.set_xticklabels(viz_1.get_xticklabels(), rotation=45) sub_1 = data.loc[data['neighbourhood_group'] == 'Brooklyn'] price_sub1 = sub_1[['price']] sub_2 = data.loc[data['neighbourhood_group'] == 'Manhattan'] price_sub2 = sub_2[['price']] sub_3 = data.loc[data['neighbourhood_group'] == 'Queens'] price_sub3 = sub_3[['price']] sub_4 = data.loc[data['neighbourhood_group'] == 'Staten Island'] price_sub4 = sub_4[['price']] sub_5 = data.loc[data['neighbourhood_group'] == 'Bronx'] price_sub5 = sub_5[['price']] price_list_by_n = [price_sub1, price_sub2, price_sub3, price_sub4, price_sub5] #we can see from our statistical table that we have some extreme values, therefore we need to remove them for the sake of a better visualization #creating a sub-dataframe with no extreme values / less than 500 sub_6=data[data.price < 500] #using violinplot to showcase density and distribtuion of prices viz_2=sns.violinplot(data=sub_6, x='neighbourhood_group', y='price') viz_2.set_title('Density and distribution of prices for each neighberhood_group') viz_4 = sub_6.plot(kind='scatter', x='longitude', y='latitude', label='availability_365', c='price', cmap=plt.get_cmap('jet'), colorbar=True, alpha=0.4, figsize=(10, 8)) viz_4.legend()
code
73069645/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() #we will skip first column for now and begin from host_id #let's see what hosts (IDs) have the most listings on Airbnb platform and taking advantage of this service top_host=data.host_id.value_counts().head(10) top_host top_host_df = pd.DataFrame(top_host) top_host_df.reset_index(inplace=True) top_host_df.rename(columns={'index': 'Host_ID', 'host_id': 'P_Count'}, inplace=True) top_host_df
code
73069645/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape
code
73069645/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import matplotlib.pyplot as plt import matplotlib.image as mpimg import seaborn as sns
code