path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
16150255/cell_16
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import math import matplotlib.pyplot as plt import pandas as pd # For loading and processing the dataset import torch import torch.nn.functional as F df_train = pd.read_csv('../input/train.csv') df_train = df_train.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked'], prefix='Embarked')], axis=1) df_train = df_train.drop('Embarked', axis=1) X_train = df_train.drop('Survived', axis=1).as_matrix() y_train = df_train['Survived'].as_matrix() X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2) X_train, y_train, X_test, y_test = map(torch.tensor, (X_train, y_train, X_test, y_test)) def model(x): a1 = torch.matmul(x, weights1) + bias1 h1 = a1.sigmoid() a2 = torch.matmul(h1, weights2) + bias2 h2 = a2.sigmoid() a3 = torch.matmul(h2, weights3) + bias3 h3 = a3.exp() / a3.exp().sum(-1).unsqueeze(-1) return h3 def loss_fn(y_hat, y): return -y_hat[range(y.shape[0]), y].log().mean() def accuracy(y_hat, y): pred = torch.argmax(y_hat, dim=1) return (pred == y).float().mean() torch.manual_seed(0) weights1 = torch.randn(9, 128) / math.sqrt(2) weights1.requires_grad_() bias1 = torch.zeros(128, requires_grad=True) weights2 = torch.randn(128, 256) / math.sqrt(2) weights2.requires_grad_() bias2 = torch.zeros(256, requires_grad=True) weights3 = torch.randn(256, 2) / math.sqrt(2) weights3.requires_grad_() bias3 = torch.zeros(2, requires_grad=True) learning_rate = 0.2 epochs = 10000 X_train = X_train.float() y_train = y_train.long() loss_arr = [] acc_arr = [] for epoch in range(epochs): y_hat = model(X_train) loss = F.cross_entropy(y_hat, y_train) loss.backward() loss_arr.append(loss.item()) acc_arr.append(accuracy(y_hat, y_train)) with torch.no_grad(): weights1 -= weights1.grad * learning_rate bias1 -= bias1.grad * learning_rate weights2 -= weights2.grad * learning_rate bias2 -= bias2.grad * learning_rate weights3 -= weights3.grad * learning_rate bias3 -= bias3.grad * learning_rate weights1.grad.zero_() bias1.grad.zero_() weights2.grad.zero_() bias2.grad.zero_() weights3.grad.zero_() bias3.grad.zero_() plt.plot(loss_arr, 'r-') plt.plot(acc_arr, 'b-') plt.show() print('Loss before training', loss_arr[0]) print('Loss after training', loss_arr[-1])
code
16150255/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split import math import matplotlib.pyplot as plt import pandas as pd # For loading and processing the dataset import torch import torch.nn.functional as F df_train = pd.read_csv('../input/train.csv') df_train = df_train.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked'], prefix='Embarked')], axis=1) df_train = df_train.drop('Embarked', axis=1) X_train = df_train.drop('Survived', axis=1).as_matrix() y_train = df_train['Survived'].as_matrix() X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2) X_train, y_train, X_test, y_test = map(torch.tensor, (X_train, y_train, X_test, y_test)) def model(x): a1 = torch.matmul(x, weights1) + bias1 h1 = a1.sigmoid() a2 = torch.matmul(h1, weights2) + bias2 h2 = a2.sigmoid() a3 = torch.matmul(h2, weights3) + bias3 h3 = a3.exp() / a3.exp().sum(-1).unsqueeze(-1) return h3 def loss_fn(y_hat, y): return -y_hat[range(y.shape[0]), y].log().mean() def accuracy(y_hat, y): pred = torch.argmax(y_hat, dim=1) return (pred == y).float().mean() torch.manual_seed(0) weights1 = torch.randn(9, 128) / math.sqrt(2) weights1.requires_grad_() bias1 = torch.zeros(128, requires_grad=True) weights2 = torch.randn(128, 256) / math.sqrt(2) weights2.requires_grad_() bias2 = torch.zeros(256, requires_grad=True) weights3 = torch.randn(256, 2) / math.sqrt(2) weights3.requires_grad_() bias3 = torch.zeros(2, requires_grad=True) learning_rate = 0.2 epochs = 10000 X_train = X_train.float() y_train = y_train.long() loss_arr = [] acc_arr = [] for epoch in range(epochs): y_hat = model(X_train) loss = F.cross_entropy(y_hat, y_train) loss.backward() loss_arr.append(loss.item()) acc_arr.append(accuracy(y_hat, y_train)) with torch.no_grad(): weights1 -= weights1.grad * learning_rate bias1 -= bias1.grad * learning_rate weights2 -= weights2.grad * learning_rate bias2 -= bias2.grad * learning_rate weights3 -= weights3.grad * learning_rate bias3 -= bias3.grad * learning_rate weights1.grad.zero_() bias1.grad.zero_() weights2.grad.zero_() bias2.grad.zero_() weights3.grad.zero_() bias3.grad.zero_() weights3.grad
code
16150255/cell_12
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # For loading and processing the dataset df_train = pd.read_csv('../input/train.csv') df_train = df_train.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked'], prefix='Embarked')], axis=1) df_train = df_train.drop('Embarked', axis=1) X_train = df_train.drop('Survived', axis=1).as_matrix() y_train = df_train['Survived'].as_matrix() X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2) y_train
code
48165864/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum() df.loc[(df['Port Code'] == 103) | (df['Port Code'] == 3302)]
code
48165864/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.describe(include='all')
code
48165864/cell_9
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.info()
code
48165864/cell_34
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum() df.loc[(df['Port Code'] == 103) | (df['Port Code'] == 3302)] df.loc[(df['Port Name'] == 'Eastport') & (df['State'] == 'Maine'), 'Port Name'] = 'Eastport Maine' Date = pd.to_datetime(df['Date']) Date df2 = pd.DataFrame(df) df2 def longitude(x): b1 = x.split('(')[1] b2 = b1.split(')') for i in b2: return i.split()[0] def latitude(x): b1 = x.split('(')[1] b2 = b1.split(')') for i in b2: return i.split()[1] df2['latitude'] = df2.Location.apply(latitude) df2['longitude'] = df2.Location.apply(longitude) Border_count = df2.Border.value_counts() Border_count
code
48165864/cell_20
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum() port = df[['Port Name', 'Port Code']].drop_duplicates() port[port['Port Name'].duplicated(keep=False)]
code
48165864/cell_6
[ "text_html_output_1.png" ]
import os import seaborn as sns import warnings import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import pandas as pd import numpy as np import seaborn as sns from matplotlib import pyplot as plt sns.set(palette='Set1')
code
48165864/cell_29
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum() df.loc[(df['Port Code'] == 103) | (df['Port Code'] == 3302)] df.loc[(df['Port Name'] == 'Eastport') & (df['State'] == 'Maine'), 'Port Name'] = 'Eastport Maine' Date = pd.to_datetime(df['Date']) Date df2 = pd.DataFrame(df) df2 df2['Year'] = Date.dt.year df2['Month'] = Date.dt.month df2
code
48165864/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum() df.loc[(df['Port Code'] == 103) | (df['Port Code'] == 3302)] df.loc[(df['Port Name'] == 'Eastport') & (df['State'] == 'Maine'), 'Port Name'] = 'Eastport Maine' Date = pd.to_datetime(df['Date']) Date
code
48165864/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df
code
48165864/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum() df.loc[(df['Port Code'] == 103) | (df['Port Code'] == 3302)] df.loc[(df['Port Name'] == 'Eastport') & (df['State'] == 'Maine'), 'Port Name'] = 'Eastport Maine' Date = pd.to_datetime(df['Date']) Date df2 = pd.DataFrame(df) df2 def longitude(x): b1 = x.split('(')[1] b2 = b1.split(')') for i in b2: return i.split()[0] def latitude(x): b1 = x.split('(')[1] b2 = b1.split(')') for i in b2: return i.split()[1] df2['latitude'] = df2.Location.apply(latitude) df2['longitude'] = df2.Location.apply(longitude) df2
code
48165864/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum()
code
48165864/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum() print('Unique values in Port Name: ' + str(df['Port Name'].nunique())) print('Unique values in Port Code: ' + str(df['Port Code'].nunique()))
code
48165864/cell_35
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum() df.loc[(df['Port Code'] == 103) | (df['Port Code'] == 3302)] df.loc[(df['Port Name'] == 'Eastport') & (df['State'] == 'Maine'), 'Port Name'] = 'Eastport Maine' Date = pd.to_datetime(df['Date']) Date df2 = pd.DataFrame(df) df2 def longitude(x): b1 = x.split('(')[1] b2 = b1.split(')') for i in b2: return i.split()[0] def latitude(x): b1 = x.split('(')[1] b2 = b1.split(')') for i in b2: return i.split()[1] df2['latitude'] = df2.Location.apply(latitude) df2['longitude'] = df2.Location.apply(longitude) Border_count = df2.Border.value_counts() Border_count plt.figure(figsize=(7, 6)) plt.pie(x=Border_count.values, explode=[0.03, 0.03], labels=Border_count.index, autopct='%0.2f%%') plt.title('Composition of the Borders') plt.show()
code
48165864/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum()
code
48165864/cell_27
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/border-crossing-entry-data/Border_Crossing_Entry_Data.csv') df df.isnull().sum() df.duplicated().sum() df.loc[(df['Port Code'] == 103) | (df['Port Code'] == 3302)] df.loc[(df['Port Name'] == 'Eastport') & (df['State'] == 'Maine'), 'Port Name'] = 'Eastport Maine' Date = pd.to_datetime(df['Date']) Date df2 = pd.DataFrame(df) df2
code
16124388/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd dataset = pd.read_csv('../input/Mall_Customers.csv') pd.set_option('display.max_columns', 10) scaler = StandardScaler() df = pd.DataFrame(scaler.fit_transform(dataset[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']])) df.columns = ['age', 'income', 'spending'] df.insert(0, 'gender', dataset['Gender_code']) df.head()
code
16124388/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/Mall_Customers.csv') pd.set_option('display.max_columns', 10) print(dataset.keys()) print(len(dataset)) print(dataset.head())
code
16124388/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('../input/Mall_Customers.csv') pd.set_option('display.max_columns', 10) dataset.describe().transpose()
code
16124388/cell_11
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv('../input/Mall_Customers.csv') pd.set_option('display.max_columns', 10) scaler = StandardScaler() df = pd.DataFrame(scaler.fit_transform(dataset[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']])) df.columns = ['age', 'income', 'spending'] df.insert(0, 'gender', dataset['Gender_code']) plot_gender = sns.distplot(df['gender'], label='gender', color='grey') plot_age = sns.distplot(df['age'], label='age', color='blue') plot_income = sns.distplot(df['income'], label='income', color='lightgreen') plot_spend = sns.distplot(df['spending'], label='spend', color='orange') plt.xlabel('') plt.legend() plt.show()
code
16124388/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd dataset = pd.read_csv('../input/Mall_Customers.csv') pd.set_option('display.max_columns', 10) print(dataset['Gender'].unique()) dataset['Gender_code'] = np.where(dataset['Gender'] == 'Male', 1, 0)
code
16124388/cell_15
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv('../input/Mall_Customers.csv') pd.set_option('display.max_columns', 10) scaler = StandardScaler() df = pd.DataFrame(scaler.fit_transform(dataset[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']])) df.columns = ['age', 'income', 'spending'] df.insert(0, 'gender', dataset['Gender_code']) # Histograms plot_gender = sns.distplot(df["gender"], label="gender",color="grey") plot_age = sns.distplot(df["age"], label="age",color="blue") plot_income = sns.distplot(df["income"], label="income",color="lightgreen") plot_spend = sns.distplot(df["spending"], label="spend",color="orange") plt.xlabel('') plt.legend() plt.show() # Violin plot f, axes = plt.subplots(2,2, figsize=(12,6), sharex=True, sharey=True) v1 = sns.violinplot(data=df, x="gender", color="gray",ax=axes[0,0]) v2 = sns.violinplot(data=df, x="age", color="skyblue",ax=axes[0,1]) v3 = sns.violinplot(data=df, x="income",color="lightgreen", ax=axes[1,0]) v4 = sns.violinplot(data=df, x="spending",color="pink", ax=axes[1,1]) wcss = [] k_s = [i * i for i in range(1, 8)] print(k_s) for i in k_s: km = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) km.fit(df) wcss.append(km.inertia_) plt.plot(k_s, wcss) plt.title('Elbow Method') plt.xlabel('Number of clusters') plt.ylabel('wcss') plt.show()
code
16124388/cell_16
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv('../input/Mall_Customers.csv') pd.set_option('display.max_columns', 10) scaler = StandardScaler() df = pd.DataFrame(scaler.fit_transform(dataset[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']])) df.columns = ['age', 'income', 'spending'] df.insert(0, 'gender', dataset['Gender_code']) # Histograms plot_gender = sns.distplot(df["gender"], label="gender",color="grey") plot_age = sns.distplot(df["age"], label="age",color="blue") plot_income = sns.distplot(df["income"], label="income",color="lightgreen") plot_spend = sns.distplot(df["spending"], label="spend",color="orange") plt.xlabel('') plt.legend() plt.show() # Violin plot f, axes = plt.subplots(2,2, figsize=(12,6), sharex=True, sharey=True) v1 = sns.violinplot(data=df, x="gender", color="gray",ax=axes[0,0]) v2 = sns.violinplot(data=df, x="age", color="skyblue",ax=axes[0,1]) v3 = sns.violinplot(data=df, x="income",color="lightgreen", ax=axes[1,0]) v4 = sns.violinplot(data=df, x="spending",color="pink", ax=axes[1,1]) wcss = [] k_s = [i * i for i in range(1, 8)] for i in k_s: km = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) km.fit(df) wcss.append(km.inertia_) wcss = [] k_s = [4, 7, 9] print(k_s) for i in k_s: km = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) km.fit(df) wcss.append(km.inertia_) plt.plot(k_s, wcss) plt.title('Elbow Method') plt.xlabel('Number of clusters') plt.ylabel('wcss') plt.show()
code
16124388/cell_17
[ "image_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv('../input/Mall_Customers.csv') pd.set_option('display.max_columns', 10) scaler = StandardScaler() df = pd.DataFrame(scaler.fit_transform(dataset[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']])) df.columns = ['age', 'income', 'spending'] df.insert(0, 'gender', dataset['Gender_code']) # Histograms plot_gender = sns.distplot(df["gender"], label="gender",color="grey") plot_age = sns.distplot(df["age"], label="age",color="blue") plot_income = sns.distplot(df["income"], label="income",color="lightgreen") plot_spend = sns.distplot(df["spending"], label="spend",color="orange") plt.xlabel('') plt.legend() plt.show() # Violin plot f, axes = plt.subplots(2,2, figsize=(12,6), sharex=True, sharey=True) v1 = sns.violinplot(data=df, x="gender", color="gray",ax=axes[0,0]) v2 = sns.violinplot(data=df, x="age", color="skyblue",ax=axes[0,1]) v3 = sns.violinplot(data=df, x="income",color="lightgreen", ax=axes[1,0]) v4 = sns.violinplot(data=df, x="spending",color="pink", ax=axes[1,1]) wcss = [] k_s = [i * i for i in range(1, 8)] for i in k_s: km = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) km.fit(df) wcss.append(km.inertia_) wcss = [] k_s = [4, 7, 9] for i in k_s: km = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) km.fit(df) wcss.append(km.inertia_) wcss = [] k_s = [4, 6, 7] print(k_s) for i in k_s: km = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) km.fit(df) wcss.append(km.inertia_) plt.plot(k_s, wcss) plt.title('Elbow Method') plt.xlabel('Number of clusters') plt.ylabel('wcss') plt.show()
code
16124388/cell_12
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv('../input/Mall_Customers.csv') pd.set_option('display.max_columns', 10) scaler = StandardScaler() df = pd.DataFrame(scaler.fit_transform(dataset[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']])) df.columns = ['age', 'income', 'spending'] df.insert(0, 'gender', dataset['Gender_code']) # Histograms plot_gender = sns.distplot(df["gender"], label="gender",color="grey") plot_age = sns.distplot(df["age"], label="age",color="blue") plot_income = sns.distplot(df["income"], label="income",color="lightgreen") plot_spend = sns.distplot(df["spending"], label="spend",color="orange") plt.xlabel('') plt.legend() plt.show() f, axes = plt.subplots(2, 2, figsize=(12, 6), sharex=True, sharey=True) v1 = sns.violinplot(data=df, x='gender', color='gray', ax=axes[0, 0]) v2 = sns.violinplot(data=df, x='age', color='skyblue', ax=axes[0, 1]) v3 = sns.violinplot(data=df, x='income', color='lightgreen', ax=axes[1, 0]) v4 = sns.violinplot(data=df, x='spending', color='pink', ax=axes[1, 1])
code
130011352/cell_3
[ "text_plain_output_1.png" ]
import json import numpy as np import os import pandas as pd import numpy as np import pandas as pd import json import os source_list = set() for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: with open(str(os.path.join(dirname, filename)), 'r') as file: json_file = json.load(file) for record in json_file: source_list.add(record) df_dictionary = {} for record in source_list: df_dictionary[record] = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: name = filename.split('.') city = name[0] with open(str(os.path.join(dirname, filename)), 'r') as file: json_file = json.load(file) for source in json_file: temp_df = pd.DataFrame(json_file[source]) temp_df['city'] = city df_dictionary[source].append(temp_df) for df in df_dictionary: result = pd.concat(df_dictionary[df]) df_dictionary[df] = result airbnbHotels = df_dictionary['airbnbHotels'] bookingHotels = df_dictionary['bookingHotels'] hotelsComHotels = df_dictionary['hotelsComHotels'] L = bookingHotels['price'].apply(lambda x: x['value']) print(np.mean(L))
code
130011352/cell_5
[ "text_plain_output_1.png" ]
import json import numpy as np import os import pandas as pd import numpy as np import pandas as pd import json import os source_list = set() for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: with open(str(os.path.join(dirname, filename)), 'r') as file: json_file = json.load(file) for record in json_file: source_list.add(record) df_dictionary = {} for record in source_list: df_dictionary[record] = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: name = filename.split('.') city = name[0] with open(str(os.path.join(dirname, filename)), 'r') as file: json_file = json.load(file) for source in json_file: temp_df = pd.DataFrame(json_file[source]) temp_df['city'] = city df_dictionary[source].append(temp_df) for df in df_dictionary: result = pd.concat(df_dictionary[df]) df_dictionary[df] = result airbnbHotels = df_dictionary['airbnbHotels'] bookingHotels = df_dictionary['bookingHotels'] hotelsComHotels = df_dictionary['hotelsComHotels'] L = bookingHotels['price'].apply(lambda x: x['value']) print(pd.value_counts(airbnbHotels['price'].apply(lambda x: x['currency']))) print(np.mean(airbnbHotels['price'].apply(lambda x: x['value']))) print(pd.value_counts(bookingHotels['price'].apply(lambda x: x['currency']))) print(np.mean(bookingHotels['price'].apply(lambda x: x['value']))) print(pd.value_counts(hotelsComHotels['price'].apply(lambda x: x['currency']))) print(np.mean(hotelsComHotels['price'].apply(lambda x: x['value'])))
code
1005853/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) plt.tick_params(top='off', bottom='on', left='off', right='off', labelleft='on', labelbottom='on') plt.xlim([0, 4]) ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.tight_layout() plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) plt.xlim([-1, 2]) ax.set_xticks([0, 1]) ax.set_xticklabels(['Not survived', 'Survived'], rotation='vertical') plt.tight_layout() y_surv = [len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] y_not_surv = [len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] (y_surv, y_not_surv) x = np.array([1, 2, 3]) width=0.3 fig, ax = plt.subplots() bar1 = ax.bar(x - width, y_surv, width, color='lightblue', label='Survived') bar2 = ax.bar(x, y_not_surv, width, color='pink', label='Not survived') plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.xlim([0, 4]) plt.ylabel('Count') plt.grid(True) plt.legend(loc='upper left') counts = train_data.groupby(['Age_group', 'Survived']).Age_group.count().unstack() plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) y_surv_2 = [len(train_data[(train_data['Survived'] == 1) & (train_data['Embarked'] == 'S')]['Embarked'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Embarked'] == 'C')]['Embarked'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Embarked'] == 'Q')]['Embarked'].tolist())] y_not_surv_2 = [len(train_data[(train_data['Survived'] == 0) & (train_data['Embarked'] == 'S')]['Embarked'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Embarked'] == 'C')]['Embarked'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Embarked'] == 'Q')]['Embarked'].tolist())] (y_surv_2, y_not_surv_2)
code
1005853/cell_13
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) sum(train_data[train_data['Survived'] == 1]['Age'].isnull()) / len(train_data)
code
1005853/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) train_data['Pclass'].unique()
code
1005853/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) train_data.describe()
code
1005853/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) plt.tick_params(top='off', bottom='on', left='off', right='off', labelleft='on', labelbottom='on') plt.xlim([0, 4]) ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.tight_layout() plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) plt.xlim([-1, 2]) ax.set_xticks([0, 1]) ax.set_xticklabels(['Not survived', 'Survived'], rotation='vertical') plt.tight_layout() y_surv = [len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] y_not_surv = [len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] (y_surv, y_not_surv) x = np.array([1, 2, 3]) width = 0.3 fig, ax = plt.subplots() bar1 = ax.bar(x - width, y_surv, width, color='lightblue', label='Survived') bar2 = ax.bar(x, y_not_surv, width, color='pink', label='Not survived') plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.xlim([0, 4]) plt.ylabel('Count') plt.grid(True) plt.legend(loc='upper left')
code
1005853/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) plt.tick_params(top='off', bottom='on', left='off', right='off', labelleft='on', labelbottom='on') plt.xlim([0, 4]) ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.tight_layout() plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) plt.xlim([-1, 2]) ax.set_xticks([0, 1]) ax.set_xticklabels(['Not survived', 'Survived'], rotation='vertical') plt.tight_layout() y_surv = [len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] y_not_surv = [len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] (y_surv, y_not_surv) x = np.array([1, 2, 3]) width=0.3 fig, ax = plt.subplots() bar1 = ax.bar(x - width, y_surv, width, color='lightblue', label='Survived') bar2 = ax.bar(x, y_not_surv, width, color='pink', label='Not survived') plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.xlim([0, 4]) plt.ylabel('Count') plt.grid(True) plt.legend(loc='upper left') counts = train_data.groupby(['Age_group', 'Survived']).Age_group.count().unstack() plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) train_data['Embarked'].value_counts()
code
1005853/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) plt.hist(train_data['Pclass'], color='lightblue') plt.tick_params(top='off', bottom='on', left='off', right='off', labelleft='on', labelbottom='on') plt.xlim([0, 4]) ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.xlabel('Pclass') plt.ylabel('Count') plt.grid(True) plt.tight_layout()
code
1005853/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) plt.tick_params(top='off', bottom='on', left='off', right='off', labelleft='on', labelbottom='on') plt.xlim([0, 4]) ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.tight_layout() plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) plt.xlim([-1, 2]) ax.set_xticks([0, 1]) ax.set_xticklabels(['Not survived', 'Survived'], rotation='vertical') plt.tight_layout() y_surv = [len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] y_not_surv = [len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] (y_surv, y_not_surv) x = np.array([1, 2, 3]) width=0.3 fig, ax = plt.subplots() bar1 = ax.bar(x - width, y_surv, width, color='lightblue', label='Survived') bar2 = ax.bar(x, y_not_surv, width, color='pink', label='Not survived') plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.xlim([0, 4]) plt.ylabel('Count') plt.grid(True) plt.legend(loc='upper left') counts = train_data.groupby(['Age_group', 'Survived']).Age_group.count().unstack() plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) sum(train_data['Embarked'].isnull())
code
1005853/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) plt.tick_params(top='off', bottom='on', left='off', right='off', labelleft='on', labelbottom='on') plt.xlim([0, 4]) ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.tight_layout() plt.hist(train_data['Survived'], color='lightblue') plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) plt.grid(True) plt.xlim([-1, 2]) ax.set_xticks([0, 1]) ax.set_xticklabels(['Not survived', 'Survived'], rotation='vertical') plt.ylabel('Count') plt.tight_layout()
code
1005853/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) plt.tick_params(top='off', bottom='on', left='off', right='off', labelleft='on', labelbottom='on') plt.xlim([0, 4]) ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.tight_layout() plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) plt.xlim([-1, 2]) ax.set_xticks([0, 1]) ax.set_xticklabels(['Not survived', 'Survived'], rotation='vertical') plt.tight_layout() y_surv = [len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] y_not_surv = [len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] (y_surv, y_not_surv) x = np.array([1, 2, 3]) width=0.3 fig, ax = plt.subplots() bar1 = ax.bar(x - width, y_surv, width, color='lightblue', label='Survived') bar2 = ax.bar(x, y_not_surv, width, color='pink', label='Not survived') plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) ax.set_xticks([1, 2, 3]) plt.xlim([0, 4]) plt.ylabel('Count') plt.grid(True) plt.legend(loc='upper left') counts = train_data.groupby(['Age_group', 'Survived']).Age_group.count().unstack() counts.plot(kind='bar', stacked=True, color=['lightblue', 'pink']) plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(True) ax.spines['bottom'].set_visible(True) plt.grid(True)
code
1005853/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) sum(train_data[train_data['Survived'] == 0]['Age'].isnull()) / len(train_data)
code
1005853/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) y_surv = [len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 1) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] y_not_surv = [len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 1)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 2)]['Pclass'].tolist()), len(train_data[(train_data['Survived'] == 0) & (train_data['Pclass'] == 3)]['Pclass'].tolist())] (y_surv, y_not_surv)
code
1005853/cell_12
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) sum(train_data['Age'].isnull()) / len(train_data)
code
1005853/cell_5
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') train_data.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], inplace=True, axis=1) train_data.head()
code
2017164/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') test.shape test.dtypes
code
2017164/cell_25
[ "text_html_output_1.png" ]
from sklearn.ensemble import ExtraTreesClassifier from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import accuracy_score import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') train.shape test.shape train.dtypes test.dtypes test.fillna('missing', inplace=True) target_labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] X = train.comment_text test_X = test.comment_text from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer vect = TfidfVectorizer(max_features=50000, min_df=2) X_dtm = vect.fit_transform(X) test_X_dtm = vect.transform(test_X) from sklearn.metrics import accuracy_score from sklearn.ensemble import ExtraTreesClassifier etc = ExtraTreesClassifier(n_jobs=-1, random_state=3) for label in target_labels: print('... Processing {}'.format(label)) y = train[label] etc.fit(X_dtm, y) y_pred_X = etc.predict(X_dtm) print('Training accuracy is {}'.format(accuracy_score(y, y_pred_X))) test_y_prob = etc.predict_proba(test_X_dtm)[:, 1] sub[label] = test_y_prob
code
2017164/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') test.shape
code
2017164/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') train.shape test.shape train.dtypes test.dtypes test.fillna('missing', inplace=True) X = train.comment_text test_X = test.comment_text print(X.shape, test_X.shape)
code
2017164/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') test.shape test.head()
code
2017164/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2017164/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') sub.head()
code
2017164/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') train.shape train.dtypes
code
2017164/cell_15
[ "text_plain_output_1.png" ]
import seaborn as sns colormap = plt.cm.plasma plt.figure(figsize=(8, 8)) plt.title('Correlation of features & targets', y=1.05, size=14) sns.heatmap(data.astype(float).corr(), linewidths=0.1, vmax=1.0, square=True, cmap=colormap, linecolor='white', annot=True)
code
2017164/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') train.shape
code
2017164/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') test.shape test.dtypes test[test['comment_text'].isnull()]
code
2017164/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') train.shape train.head()
code
105218033/cell_21
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer num_pipeline = Pipeline([('num_imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) from sklearn.preprocessing import OneHotEncoder cat_pipeline = Pipeline([('cat_imputer', SimpleImputer(strategy='most_frequent')), ('encode', OneHotEncoder())]) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import PolynomialFeatures num_labels = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare'] cat_labels = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num_trans', num_pipeline, num_labels), ('cat_trans', cat_pipeline, cat_labels)]) x_train_prepared = full_pipeline.fit_transform(x_train) def model_fit_and_print_acc(model): model.fit(x_train_prepared, y_train) pred = model.predict(x_train_prepared) cv_scores = cross_val_score(model, x_train_prepared, y_train, cv=3, scoring='accuracy') from sklearn.ensemble import RandomForestClassifier forest_clf = RandomForestClassifier() from sklearn.model_selection import GridSearchCV param_grid = [{'n_estimators': [150, 200, 250], 'max_features': [5, 10], 'max_depth': [5, 10, 20], 'min_samples_leaf': [5, 10, 20], 'min_samples_split': [5, 10, 20]}] forest_clf = RandomForestClassifier() grid_search = GridSearchCV(forest_clf, param_grid, cv=3, scoring='accuracy', return_train_score=True) grid_search.fit(x_train_prepared, y_train) best_search_rf = grid_search.best_estimator_ grid_search.best_params_ model_fit_and_print_acc(best_search_rf)
code
105218033/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] x_train.describe()
code
105218033/cell_23
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer num_pipeline = Pipeline([('num_imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) from sklearn.preprocessing import OneHotEncoder cat_pipeline = Pipeline([('cat_imputer', SimpleImputer(strategy='most_frequent')), ('encode', OneHotEncoder())]) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import PolynomialFeatures num_labels = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare'] cat_labels = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num_trans', num_pipeline, num_labels), ('cat_trans', cat_pipeline, cat_labels)]) x_train_prepared = full_pipeline.fit_transform(x_train) def model_fit_and_print_acc(model): model.fit(x_train_prepared, y_train) pred = model.predict(x_train_prepared) cv_scores = cross_val_score(model, x_train_prepared, y_train, cv=3, scoring='accuracy') poly_log_reg = Pipeline([('poly_features', PolynomialFeatures(degree=3, include_bias=False)), ('log_reg', LogisticRegression(penalty='elasticnet', l1_ratio=0.7, max_iter=100000, solver='saga'))]) from sklearn.ensemble import RandomForestClassifier forest_clf = RandomForestClassifier() from sklearn.model_selection import GridSearchCV param_grid = [{'n_estimators': [150, 200, 250], 'max_features': [5, 10], 'max_depth': [5, 10, 20], 'min_samples_leaf': [5, 10, 20], 'min_samples_split': [5, 10, 20]}] forest_clf = RandomForestClassifier() grid_search = GridSearchCV(forest_clf, param_grid, cv=3, scoring='accuracy', return_train_score=True) grid_search.fit(x_train_prepared, y_train) best_search_rf = grid_search.best_estimator_ grid_search.best_params_ from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(20) from sklearn.ensemble import VotingClassifier voting = VotingClassifier(estimators=[('poly_logres', poly_log_reg), ('grid_randomforest', best_search_rf), ('knn', knn)], voting='hard') voting.fit(x_train_prepared, y_train) pred = voting.predict(x_train_prepared) print(accuracy_score(pred, y_train)) cv_scores = cross_val_score(voting, x_train_prepared, y_train, cv=3, scoring='accuracy') cv_scores.mean()
code
105218033/cell_20
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.metrics import accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer num_pipeline = Pipeline([('num_imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) from sklearn.preprocessing import OneHotEncoder cat_pipeline = Pipeline([('cat_imputer', SimpleImputer(strategy='most_frequent')), ('encode', OneHotEncoder())]) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import PolynomialFeatures num_labels = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare'] cat_labels = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num_trans', num_pipeline, num_labels), ('cat_trans', cat_pipeline, cat_labels)]) x_train_prepared = full_pipeline.fit_transform(x_train) def model_fit_and_print_acc(model): model.fit(x_train_prepared, y_train) pred = model.predict(x_train_prepared) cv_scores = cross_val_score(model, x_train_prepared, y_train, cv=3, scoring='accuracy') from sklearn.ensemble import RandomForestClassifier forest_clf = RandomForestClassifier() from sklearn.model_selection import GridSearchCV param_grid = [{'n_estimators': [150, 200, 250], 'max_features': [5, 10], 'max_depth': [5, 10, 20], 'min_samples_leaf': [5, 10, 20], 'min_samples_split': [5, 10, 20]}] forest_clf = RandomForestClassifier() grid_search = GridSearchCV(forest_clf, param_grid, cv=3, scoring='accuracy', return_train_score=True) grid_search.fit(x_train_prepared, y_train) best_search_rf = grid_search.best_estimator_ print(best_search_rf) grid_search.best_params_
code
105218033/cell_19
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer num_pipeline = Pipeline([('num_imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) from sklearn.preprocessing import OneHotEncoder cat_pipeline = Pipeline([('cat_imputer', SimpleImputer(strategy='most_frequent')), ('encode', OneHotEncoder())]) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import PolynomialFeatures num_labels = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare'] cat_labels = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num_trans', num_pipeline, num_labels), ('cat_trans', cat_pipeline, cat_labels)]) x_train_prepared = full_pipeline.fit_transform(x_train) def model_fit_and_print_acc(model): model.fit(x_train_prepared, y_train) pred = model.predict(x_train_prepared) cv_scores = cross_val_score(model, x_train_prepared, y_train, cv=3, scoring='accuracy') from sklearn.ensemble import RandomForestClassifier forest_clf = RandomForestClassifier() model_fit_and_print_acc(forest_clf)
code
105218033/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] x_train.info()
code
105218033/cell_18
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer num_pipeline = Pipeline([('num_imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) from sklearn.preprocessing import OneHotEncoder cat_pipeline = Pipeline([('cat_imputer', SimpleImputer(strategy='most_frequent')), ('encode', OneHotEncoder())]) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import PolynomialFeatures num_labels = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare'] cat_labels = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num_trans', num_pipeline, num_labels), ('cat_trans', cat_pipeline, cat_labels)]) x_train_prepared = full_pipeline.fit_transform(x_train) def model_fit_and_print_acc(model): model.fit(x_train_prepared, y_train) pred = model.predict(x_train_prepared) cv_scores = cross_val_score(model, x_train_prepared, y_train, cv=3, scoring='accuracy') poly_log_reg = Pipeline([('poly_features', PolynomialFeatures(degree=3, include_bias=False)), ('log_reg', LogisticRegression(penalty='elasticnet', l1_ratio=0.7, max_iter=100000, solver='saga'))]) model_fit_and_print_acc(poly_log_reg)
code
105218033/cell_8
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] x_test.info()
code
105218033/cell_17
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer num_pipeline = Pipeline([('num_imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) from sklearn.preprocessing import OneHotEncoder cat_pipeline = Pipeline([('cat_imputer', SimpleImputer(strategy='most_frequent')), ('encode', OneHotEncoder())]) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import PolynomialFeatures num_labels = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare'] cat_labels = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num_trans', num_pipeline, num_labels), ('cat_trans', cat_pipeline, cat_labels)]) x_train_prepared = full_pipeline.fit_transform(x_train) def model_fit_and_print_acc(model): model.fit(x_train_prepared, y_train) pred = model.predict(x_train_prepared) cv_scores = cross_val_score(model, x_train_prepared, y_train, cv=3, scoring='accuracy') from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score log_reg = LogisticRegression() model_fit_and_print_acc(log_reg)
code
105218033/cell_22
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) labels = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] x_train = x_train[labels] x_test = x_test[labels] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer num_pipeline = Pipeline([('num_imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) from sklearn.preprocessing import OneHotEncoder cat_pipeline = Pipeline([('cat_imputer', SimpleImputer(strategy='most_frequent')), ('encode', OneHotEncoder())]) from sklearn.compose import ColumnTransformer from sklearn.preprocessing import PolynomialFeatures num_labels = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare'] cat_labels = ['Sex', 'Embarked'] full_pipeline = ColumnTransformer([('num_trans', num_pipeline, num_labels), ('cat_trans', cat_pipeline, cat_labels)]) x_train_prepared = full_pipeline.fit_transform(x_train) def model_fit_and_print_acc(model): model.fit(x_train_prepared, y_train) pred = model.predict(x_train_prepared) cv_scores = cross_val_score(model, x_train_prepared, y_train, cv=3, scoring='accuracy') from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(20) model_fit_and_print_acc(knn)
code
105218033/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_data = pd.read_csv('/kaggle/input/titanic/train.csv') x_train = pd.read_csv('/kaggle/input/titanic/train.csv') x_test = pd.read_csv('/kaggle/input/titanic/test.csv') y_train = np.array(x_train['Survived'].copy()) id_test = np.array(x_test['PassengerId'].copy()) x_train.head()
code
72097528/cell_9
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') missing_train = train.isna().sum().sum() + train.isnull().sum().sum() missing_test = test.isna().sum().sum() + test.isnull().sum().sum() from sklearn.model_selection import train_test_split y = train['target'] X = train.drop(['target'], axis=1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X.columns if X[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) from sklearn.preprocessing import OrdinalEncoder enc = OrdinalEncoder() label_X_train = X_train.drop(bad_label_cols, axis=1) label_X_valid = X_valid.drop(bad_label_cols, axis=1) label_X_train[good_label_cols] = enc.fit_transform(X_train[good_label_cols]) label_X_valid[good_label_cols] = enc.transform(X_valid[good_label_cols]) label_X_train.head()
code
72097528/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') missing_train = train.isna().sum().sum() + train.isnull().sum().sum() missing_test = test.isna().sum().sum() + test.isnull().sum().sum() print('Missing values - train data: {}'.format(missing_train)) print('Missing values - test data: {}'.format(missing_test))
code
72097528/cell_6
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') missing_train = train.isna().sum().sum() + train.isnull().sum().sum() missing_test = test.isna().sum().sum() + test.isnull().sum().sum() from sklearn.model_selection import train_test_split y = train['target'] X = train.drop(['target'], axis=1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X.columns if X[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) if good_label_cols == object_cols: print('All object columns are good!') else: print('These are the bad object columns: ', bad_label_cols)
code
72097528/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72097528/cell_15
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') missing_train = train.isna().sum().sum() + train.isnull().sum().sum() missing_test = test.isna().sum().sum() + test.isnull().sum().sum() from sklearn.model_selection import train_test_split y = train['target'] X = train.drop(['target'], axis=1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X.columns if X[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) from sklearn.preprocessing import OrdinalEncoder enc = OrdinalEncoder() label_X_train = X_train.drop(bad_label_cols, axis=1) label_X_valid = X_valid.drop(bad_label_cols, axis=1) label_X_train[good_label_cols] = enc.fit_transform(X_train[good_label_cols]) label_X_valid[good_label_cols] = enc.transform(X_valid[good_label_cols]) low_cardinality_cols = [col for col in object_cols if X_train[col].nunique() < 10] high_cardinality_cols = list(set(object_cols) - set(low_cardinality_cols)) from sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder(sparse=False) OH_X_train = pd.DataFrame(enc.fit_transform(X_train[low_cardinality_cols])) OH_X_valid = pd.DataFrame(enc.transform(X_valid[low_cardinality_cols])) OH_X_train.index = X_train.index OH_X_valid.index = X_valid.index num_X_train = X_train.drop(object_cols, axis=1) num_X_valid = X_valid.drop(object_cols, axis=1) OH_X_train = pd.concat([num_X_train, OH_X_train], axis=1) OH_X_valid = pd.concat([num_X_valid, OH_X_valid], axis=1) OH_X_train.describe()
code
72097528/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') train.describe()
code
72097528/cell_17
[ "text_html_output_1.png" ]
from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') missing_train = train.isna().sum().sum() + train.isnull().sum().sum() missing_test = test.isna().sum().sum() + test.isnull().sum().sum() from sklearn.model_selection import train_test_split y = train['target'] X = train.drop(['target'], axis=1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X.columns if X[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) from sklearn.preprocessing import OrdinalEncoder enc = OrdinalEncoder() label_X_train = X_train.drop(bad_label_cols, axis=1) label_X_valid = X_valid.drop(bad_label_cols, axis=1) label_X_train[good_label_cols] = enc.fit_transform(X_train[good_label_cols]) label_X_valid[good_label_cols] = enc.transform(X_valid[good_label_cols]) low_cardinality_cols = [col for col in object_cols if X_train[col].nunique() < 10] high_cardinality_cols = list(set(object_cols) - set(low_cardinality_cols)) from sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder(sparse=False) OH_X_train = pd.DataFrame(enc.fit_transform(X_train[low_cardinality_cols])) OH_X_valid = pd.DataFrame(enc.transform(X_valid[low_cardinality_cols])) OH_X_train.index = X_train.index OH_X_valid.index = X_valid.index num_X_train = X_train.drop(object_cols, axis=1) num_X_valid = X_valid.drop(object_cols, axis=1) OH_X_train = pd.concat([num_X_train, OH_X_train], axis=1) OH_X_valid = pd.concat([num_X_valid, OH_X_valid], axis=1) from sklearn import preprocessing scaler = preprocessing.StandardScaler().fit(label_X_train) X_scaled = pd.DataFrame(scaler.transform(label_X_train), columns=label_X_train.columns, index=label_X_train.index) X_scaled
code
72097528/cell_14
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') missing_train = train.isna().sum().sum() + train.isnull().sum().sum() missing_test = test.isna().sum().sum() + test.isnull().sum().sum() from sklearn.model_selection import train_test_split y = train['target'] X = train.drop(['target'], axis=1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X.columns if X[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) from sklearn.preprocessing import OrdinalEncoder enc = OrdinalEncoder() label_X_train = X_train.drop(bad_label_cols, axis=1) label_X_valid = X_valid.drop(bad_label_cols, axis=1) label_X_train[good_label_cols] = enc.fit_transform(X_train[good_label_cols]) label_X_valid[good_label_cols] = enc.transform(X_valid[good_label_cols]) low_cardinality_cols = [col for col in object_cols if X_train[col].nunique() < 10] high_cardinality_cols = list(set(object_cols) - set(low_cardinality_cols)) from sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder(sparse=False) OH_X_train = pd.DataFrame(enc.fit_transform(X_train[low_cardinality_cols])) OH_X_valid = pd.DataFrame(enc.transform(X_valid[low_cardinality_cols])) OH_X_train.index = X_train.index OH_X_valid.index = X_valid.index num_X_train = X_train.drop(object_cols, axis=1) num_X_valid = X_valid.drop(object_cols, axis=1) OH_X_train = pd.concat([num_X_train, OH_X_train], axis=1) OH_X_valid = pd.concat([num_X_valid, OH_X_valid], axis=1) OH_X_train.head()
code
72097528/cell_10
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') missing_train = train.isna().sum().sum() + train.isnull().sum().sum() missing_test = test.isna().sum().sum() + test.isnull().sum().sum() from sklearn.model_selection import train_test_split y = train['target'] X = train.drop(['target'], axis=1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X.columns if X[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) from sklearn.preprocessing import OrdinalEncoder enc = OrdinalEncoder() label_X_train = X_train.drop(bad_label_cols, axis=1) label_X_valid = X_valid.drop(bad_label_cols, axis=1) label_X_train[good_label_cols] = enc.fit_transform(X_train[good_label_cols]) label_X_valid[good_label_cols] = enc.transform(X_valid[good_label_cols]) label_X_train.describe()
code
72097528/cell_12
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', index_col='id') missing_train = train.isna().sum().sum() + train.isnull().sum().sum() missing_test = test.isna().sum().sum() + test.isnull().sum().sum() from sklearn.model_selection import train_test_split y = train['target'] X = train.drop(['target'], axis=1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X.columns if X[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) from sklearn.preprocessing import OrdinalEncoder enc = OrdinalEncoder() label_X_train = X_train.drop(bad_label_cols, axis=1) label_X_valid = X_valid.drop(bad_label_cols, axis=1) label_X_train[good_label_cols] = enc.fit_transform(X_train[good_label_cols]) label_X_valid[good_label_cols] = enc.transform(X_valid[good_label_cols]) low_cardinality_cols = [col for col in object_cols if X_train[col].nunique() < 10] high_cardinality_cols = list(set(object_cols) - set(low_cardinality_cols)) print('Categorical columns that will be one-hot encoded:', low_cardinality_cols) print('\nCategorical columns that will be dropped from the dataset:', high_cardinality_cols)
code
74060776/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') ndf = df.copy() ndf['Decision'] = ndf['Decision'].astype('category') ndf['Decision'] = ndf['Decision'].cat.codes sns.heatmap(ndf.corr() ** 2, annot=True, cmap='viridis')
code
74060776/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') df.describe()
code
74060776/cell_6
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') plt.figure(figsize=(12, 6)) sns.kdeplot(data=df[df['Decision'] == 'admit'], x='GMAT', shade=True, label='Admitted') sns.kdeplot(data=df[df['Decision'] == 'border'], x='GMAT', color='green', shade=True, label='Border') sns.kdeplot(data=df[df['Decision'] == 'notadmit'], x='GMAT', color='orange', shade=True, label='Declined') plt.title('GMAT Distributions') plt.legend()
code
74060776/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') df.head()
code
74060776/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74060776/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') plt.figure(figsize=(12, 6)) sns.kdeplot(data=df[df['Decision'] == 'admit'], x='GPA', shade=True, label='Admitted') sns.kdeplot(data=df[df['Decision'] == 'border'], x='GPA', color='green', shade=True, label='Border') sns.kdeplot(data=df[df['Decision'] == 'notadmit'], x='GPA', color='orange', shade=True, label='Declined') plt.title('GPA Distributions') plt.legend()
code
74060776/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') sns.jointplot(data=df, x='GPA', y='GMAT', hue='Decision')
code
74060776/cell_16
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier rfr = RandomForestClassifier() knn = KNeighborsClassifier() logmod = LogisticRegression() rfr.fit(X_train, y_train) knn.fit(X_train, y_train) logmod.fit(X_train, y_train) error = [] for k in range(1, 40): knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, y_train) p = knn.predict(X_test) error.append(np.mean(y_test != p)) plt.plot(range(1, 40), error, ls='--', marker='X', markerfacecolor='red')
code
74060776/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') df.info()
code
74060776/cell_17
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') ndf = df.copy() ndf['Decision'] = ndf['Decision'].astype('category') ndf['Decision'] = ndf['Decision'].cat.codes X = ndf.drop('Decision', axis=1) y = ndf['Decision'] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X = scaler.fit_transform(X) from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier rfr = RandomForestClassifier() knn = KNeighborsClassifier() logmod = LogisticRegression() rfr.fit(X_train, y_train) knn.fit(X_train, y_train) logmod.fit(X_train, y_train) error = [] for k in range(1, 40): knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train, y_train) p = knn.predict(X_test) error.append(np.mean(y_test != p)) score = [] for i in range(1, 50): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) knn = KNeighborsClassifier(n_neighbors=3) knn.fit(X_train, y_train) score.append(knn.score(X_test, y_test)) np.array(score).mean()
code
74060776/cell_14
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier rfr = RandomForestClassifier() knn = KNeighborsClassifier() logmod = LogisticRegression() rfr.fit(X_train, y_train) knn.fit(X_train, y_train) logmod.fit(X_train, y_train) print('Random Forest Regressor Score: {:.2f}'.format(rfr.score(X_test, y_test))) print('K Nearest Neighbors Score: {:.2f}'.format(knn.score(X_test, y_test))) print('Logistic Regression Score: {:.2f}'.format(logmod.score(X_test, y_test)))
code
74060776/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') ndf = df.copy() ndf['Decision'] = ndf['Decision'].astype('category') ndf['Decision'] = ndf['Decision'].cat.codes ndf.head()
code
74060776/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/mba-admission/Admission.csv') df['Decision'].unique()
code
73061015/cell_21
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Dense, Activation, Flatten from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.models import Sequential x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 y_train = kr.utils.to_categorical(y_train, 10) y_test = kr.utils.to_categorical(y_test, 10) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result1 = model.fit(x_train, y_train, batch_size=128, epochs=20) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result2 = model.fit(x_train, y_train, batch_size=128, epochs=20)
code
73061015/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 plt.figure(figsize=(8, 8)) for i in range(10): plt.subplot(5, 5, i + 1) plt.title(i) plt.imshow(x_train[i].reshape(32, 32, 3))
code
73061015/cell_23
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Dense, Activation, Flatten from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 y_train = kr.utils.to_categorical(y_train, 10) y_test = kr.utils.to_categorical(y_test, 10) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result1 = model.fit(x_train, y_train, batch_size=128, epochs=20) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result2 = model.fit(x_train, y_train, batch_size=128, epochs=20) metrics = ['loss', 'accuracy'] plt.figure(figsize=(10, 5)) for i in range(len(metrics)): metric = metrics[i] plt.subplot(1, 2, i + 1) plt.title(metric) plt_result1 = result1.history[metric] plt_result2 = result2.history[metric] plt.plot(plt_result1, label='1st model') plt.plot(plt_result2, label='2nd model') plt.legend() plt.show()
code
73061015/cell_26
[ "image_output_1.png" ]
from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Dense, Activation, Flatten from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 y_train = kr.utils.to_categorical(y_train, 10) y_test = kr.utils.to_categorical(y_test, 10) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result1 = model.fit(x_train, y_train, batch_size=128, epochs=20) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result2 = model.fit(x_train, y_train, batch_size=128, epochs=20) metrics = ['loss', 'accuracy'] for i in range(len(metrics)): metric = metrics[i] plt_result1 = result1.history[metric] plt_result2 = result2.history[metric] plt.imshow(x_test[[98]].reshape(32, 32, 3))
code
73061015/cell_19
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Dense, Activation, Flatten from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.models import Sequential x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 y_train = kr.utils.to_categorical(y_train, 10) y_test = kr.utils.to_categorical(y_test, 10) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result1 = model.fit(x_train, y_train, batch_size=128, epochs=20) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary()
code
73061015/cell_7
[ "text_plain_output_1.png" ]
from keras.datasets import cifar10 (x_train, y_train), (x_test, y_test) = cifar10.load_data()
code
73061015/cell_28
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Dense, Activation, Flatten from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.models import Sequential import numpy as np x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 y_train = kr.utils.to_categorical(y_train, 10) y_test = kr.utils.to_categorical(y_test, 10) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result1 = model.fit(x_train, y_train, batch_size=128, epochs=20) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result2 = model.fit(x_train, y_train, batch_size=128, epochs=20) prediction = model.predict(x_test[[98]]) prediction names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] list1 = [] [list1.append(i) for i in range(26)] list2 = [] [list2.append(i) for i in names] dic = dict(zip(list1, list2)) print('The answer is', dic[np.argmax(prediction)], '!')
code
73061015/cell_15
[ "image_output_1.png" ]
from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Dense, Activation, Flatten from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.models import Sequential x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary()
code
73061015/cell_17
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Dense, Activation, Flatten from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.models import Sequential x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 y_train = kr.utils.to_categorical(y_train, 10) y_test = kr.utils.to_categorical(y_test, 10) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result1 = model.fit(x_train, y_train, batch_size=128, epochs=20)
code
73061015/cell_31
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Dense, Activation, Flatten from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.models import Sequential import numpy as np import pandas as pd x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 y_train = kr.utils.to_categorical(y_train, 10) y_test = kr.utils.to_categorical(y_test, 10) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result1 = model.fit(x_train, y_train, batch_size=128, epochs=20) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result2 = model.fit(x_train, y_train, batch_size=128, epochs=20) prediction = model.predict(x_test[[98]]) prediction names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] list1 = [] [list1.append(i) for i in range(26)] list2 = [] [list2.append(i) for i in names] dic = dict(zip(list1, list2)) predictions = model.predict(x_test) results = np.argmax(predictions, axis=1) results = pd.Series(results, name='Label') results.tail()
code
73061015/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Dense, Activation, Flatten from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.models import Sequential x_train = x_train.astype('float32') / 255 y_train = y_train.astype('float32') x_test = x_test.astype('float32') / 255 y_train = kr.utils.to_categorical(y_train, 10) y_test = kr.utils.to_categorical(y_test, 10) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result1 = model.fit(x_train, y_train, batch_size=128, epochs=20) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:])) model.add(Conv2D(32, (3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() result2 = model.fit(x_train, y_train, batch_size=128, epochs=20) prediction = model.predict(x_test[[98]]) prediction
code