kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
4,188,803
def GRU_model_fasttext() : global max_len,num_tokens,embedding_weights_fasttext inputs = layers.Input(shape=(max_len,)) x = layers.Embedding(input_dim=num_tokens,\ output_dim=embedding_dim,\ embeddings_initializer=keras.initializers.Constant(embedding_weights_fasttext),\ trainable=False )(inputs) x = layers.SpatialDropout1D(0.3 )(x) forward_layer = layers.GRU(64,return_sequences=True) backward_layer = layers.GRU(64,activation="relu",dropout=0.3,return_sequences=True,go_backwards=True) x = layers.Bidirectional(forward_layer,backward_layer=backward_layer )(x) avg_pool = layers.GlobalAveragePooling1D()(x) max_pool = layers.GlobalMaxPooling1D()(x) x = layers.concatenate([avg_pool,max_pool]) outputs = layers.Dense(units=6,activation='sigmoid' )(x) model = keras.models.Model(inputs=inputs, outputs=outputs, name="GRU_model") model.compile(optimizer=tf.optimizers.Adam() ,\ loss=tf.losses.BinaryCrossentropy() ,\ metrics=['AUC']) return model GRU_model_fasttext = GRU_model_fasttext() GRU_model_fasttext.summary() history = GRU_model_fasttext.fit(x_train, y_train, \ epochs=2, batch_size=32,\ validation_data=(x_val,y_val))<predict_on_test>
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True) Image("model.png" )
Digit Recognizer
4,188,803
model_nums = 2 size1 = x_train.shape[0] y_train_pred = np.zeros(( model_nums,size1,6),dtype="float32") y_train_pred[0] = GRU_model_fasttext.predict(x_train) y_train_pred[1] = GRU_model_glove.predict(x_train) size2 = X_test.shape[0] y_test_pred = np.zeros(( model_nums,size2,6),dtype="float32") y_test_pred[0] = GRU_model_fasttext.predict(X_test) y_test_pred[1] = GRU_model_glove.predict(X_test) y_pred = np.zeros(( size2,6),dtype="float32") for i in range(6): lg = LogisticRegression() temp = np.zeros(( size1,model_nums),dtype="float32") for j in range(model_nums): temp[:,j] = y_train_pred[j,:,i] lg.fit(temp,y_train[bad_comment_cat[i]]) temp = np.zeros(( size2,model_nums),dtype="float32") for j in range(model_nums): temp[:,j] = y_test_pred[j,:,i] y_pred[:,i] = lg.predict_proba(temp)[:,1]<save_to_csv>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"]) epochs = 30 batch_size = 80
Digit Recognizer
4,188,803
submission_result[bad_comment_cat] = y_pred submission_result.to_csv("submission.csv",index=False )<import_modules>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
4,188,803
import numpy as np import pandas as pd import datetime from xgboost import XGBRegressor from sklearn.model_selection import GridSearchCV, KFold<load_from_csv>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
4,188,803
train = pd.read_csv(r'.. /input/rossmann-store-sales/train.csv', parse_dates=['Date'], low_memory=False) train.head()<load_from_csv>
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
4,188,803
test = pd.read_csv(r'.. /input/rossmann-store-sales/test.csv', parse_dates=['Date'], low_memory=False, index_col='Id') test.head()<load_from_csv>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_pre.csv",index=False )
Digit Recognizer
3,811,922
store = pd.read_csv(r'.. /input/rossmann-store-sales/store.csv', index_col='Store') store.head()<data_type_conversions>
print(tf.__version__)
Digit Recognizer
3,811,922
mean_dist = store['CompetitionDistance'].mean() store.loc[store['CompetitionDistance'].isnull() , 'CompetitionDistance'] = mean_dist store.loc[store['Promo2'] == 0, ['Promo2SinceWeek','Promo2SinceYear']] = 0 store['Promo2SinceWeek'] = store['Promo2SinceWeek'].astype('int') store['Promo2SinceYear'] = store['Promo2SinceYear'].astype('int') store['CompetitionOpen'] = 1 store.loc[store['CompetitionOpenSinceMonth'].isnull() ,['CompetitionOpen', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear']] = 0 store['CompetitionOpenSinceMonth'] = store['CompetitionOpenSinceMonth'].astype('int') store['CompetitionOpenSinceYear'] = store['CompetitionOpenSinceYear'].astype('int') label_map = {'PromoInterval' : {'Jan,Apr,Jul,Oct' : 1, 'Feb,May,Aug,Nov' : 2, 'Mar,Jun,Sept,Dec' : 3, np.nan : 0}, 'Assortment' : {'a':0, 'b':1, 'c':2}} store.replace(label_map, inplace=True )<feature_engineering>
train_df = pd.read_csv('.. /input/train.csv') test_df = pd.read_csv('.. /input/test.csv') train_df.head()
Digit Recognizer
3,811,922
train.loc[train['StateHoliday'] != '0', 'StateHoliday'] = '1' train['StateHoliday'] = train['StateHoliday'].astype('int') test.loc[test['StateHoliday'] != '0', 'StateHoliday'] = '1' test['StateHoliday'] = test['StateHoliday'].astype('int') train['year'] = train['Date'].dt.year train['month'] = train['Date'].dt.month train['day'] = train['Date'].dt.day test['year'] = test['Date'].dt.year test['month'] = test['Date'].dt.month test['day'] = test['Date'].dt.day test.loc[test['Open'].isnull() , 'Open'] = 1 test['Open'] = test['Open'].astype('int') train.drop('Customers', axis=1, inplace=True )<choose_model_class>
y_train = train_df.label.values x_train = train_df.drop(columns=["label"] ).values x_test = test_df.values x_train[:10]
Digit Recognizer
3,811,922
kfold = KFold(n_splits=5, random_state=2021, shuffle=True) parameters = {'learning_rate' : [0.1,0.2,0.35]} clf = XGBRegressor(random_state=2021, use_label_encoder=False, n_estimators=100, max_depth=4 )<create_dataframe>
x_train = x_train / 255.0 x_test = x_test / 255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(784,)) , tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ])
Digit Recognizer
3,811,922
submit_frame = pd.DataFrame(columns=['Id','Sales'] )<feature_engineering>
class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('acc')> 0.997): print(" Reached 99% accuracy so cancelling training!") self.model.stop_training = True model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=['accuracy']) model.fit(x_train, y_train, epochs=30)
Digit Recognizer
3,811,922
for st_no in range(1,1116): train_t = train.loc[train['Store'] == st_no].copy() train_t.drop('Store', axis=1, inplace=True) test_t = test.loc[test['Store'] == st_no].copy() test_t.drop('Store',axis=1,inplace=True) st_t = store.loc[store.index==st_no].iloc[0,:] if test_t.shape[0] > 0: train_t['Promo2'] = 0 train_t['NewPromoAge'] = 0 train_t['Competition'] = 0 test_t['Promo2'] = 0 test_t['NewPromoAge'] = 0 test_t['Competition'] = 0 if st_t['Promo2'] == 1: train_t['Promo2'], train_t['NewPromoAge'] = zip(*train_t['Date'].map(calc_promo)) test_t['Promo2'], test_t['NewPromoAge'] = zip(*test_t['Date'].map(calc_promo)) if st_t['CompetitionOpen']: train_t['Competition'] = train_t['Date'].map(calc_comp) test_t['Competition'] = test_t['Date'].map(calc_comp) train_t.drop('Date', axis=1, inplace=True) test_t.drop('Date', axis=1, inplace=True) X_train_t = train_t.drop('Sales',axis=1) y_train_t = train_t['Sales'] cv = GridSearchCV(clf, param_grid=parameters, cv=kfold, scoring='neg_mean_squared_error') cv.fit(X_train_t, y_train_t) y_pred_t = cv.predict(test_t) y_pred_t[y_pred_t < 0] = 0 out_frame = pd.DataFrame([test_t.index, y_pred_t] ).T out_frame.columns = ['Id','Sales'] out_frame['Id'] = out_frame['Id'].astype('int') submit_frame = submit_frame.append(out_frame) print(f'Predicted on : {st_no}, train_rows={train_t.shape[0]}, test_rows={len(y_pred_t)}') else: print(f'Skipped: {st_no}, train_rows={train_t.shape[0]}' )<save_to_csv>
classifications = model.predict(x_test )
Digit Recognizer
3,811,922
submit_frame.sort_values('Id', inplace=True) print(submit_frame.shape) print(submit_frame.head()) submit_frame.to_csv(r'submission.csv', index=False )<set_options>
def write_submissions(file_name, imageId, predictions): output = pd.DataFrame({ 'ImageId': imageId, 'Label': predictions }) output.to_csv(file_name, index=False) write_submissions('submission_1.csv', pd.Series(range(1,28001)) , np.argmax(classifications, axis=1))
Digit Recognizer
3,811,922
warnings.filterwarnings('ignore' )<load_from_csv>
y_train = train_df.label.values x_train = train_df.drop(columns=["label"] ).values x_test = test_df.values
Digit Recognizer
3,811,922
df_train = pd.read_csv('.. /input/rossmann-store-sales/train.csv' )<count_missing_values>
x_train = x_train.reshape(42000, 28, 28, 1) x_test = x_test.reshape(28000, 28, 28, 1) x_train = x_train / 255.0 x_test = x_test / 255.0
Digit Recognizer
3,811,922
df_train.isnull().sum()<count_values>
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64,(3, 3), activation='relu', input_shape=(28, 28, 1)) , tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64,(3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten() , tf.keras.layers.Dense(1024, activation='relu'), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary()
Digit Recognizer
3,811,922
df_train['DayOfWeek'].value_counts()<count_unique_values>
model.fit(x_train, y_train, epochs=20 )
Digit Recognizer
3,811,922
len(df_train['Store'].unique() )<count_values>
classifications = model.predict(x_test) write_submissions('submission_2.csv', pd.Series(range(1,28001)) , np.argmax(classifications, axis=1))
Digit Recognizer
3,811,922
<count_values><EOS>
f, axarr = plt.subplots(3, 4) FIRST_IMAGE = 0 SECOND_IMAGE = 7 THIRD_IMAGE = 8 CONVOLUTION_NUMBER = 1 layer_outputs = [layer.output for layer in model.layers] activation_model = tf.keras.models.Model(inputs=model.input, outputs=layer_outputs) for x in range(0, 4): f1 = activation_model.predict(x_train[FIRST_IMAGE].reshape(1, 28, 28, 1)) [x] axarr[0, x].imshow(f1[0, :, :, CONVOLUTION_NUMBER], cmap='inferno') axarr[0, x].grid(False) f2 = activation_model.predict(x_train[SECOND_IMAGE].reshape(1, 28, 28, 1)) [x] axarr[1, x].imshow(f2[0, :, :, CONVOLUTION_NUMBER], cmap='inferno') axarr[1, x].grid(False) f3 = activation_model.predict(x_train[THIRD_IMAGE].reshape(1, 28, 28, 1)) [x] axarr[2, x].imshow(f3[0, :, :, CONVOLUTION_NUMBER], cmap='inferno') axarr[2, x].grid(False)
Digit Recognizer
3,823,766
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values>
%matplotlib inline np.random.seed(2) sns.set(style='white', context='notebook', palette='deep' )
Digit Recognizer
3,823,766
df_train[df_train['StateHoliday'] == '0']['StateHoliday'].value_counts()<count_values>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
3,823,766
df_train['StateHoliday'] = df_train['StateHoliday'].apply(lambda x: 0 if x == '0' else x) df_train['StateHoliday'].value_counts()<load_from_csv>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
3,823,766
df_store = pd.read_csv('.. /input/rossmann-store-sales/store.csv') df_store.head()<count_missing_values>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
3,823,766
df_store.isnull().sum()<count_values>
random_seed = 2
Digit Recognizer
3,823,766
df_store['StoreType'].value_counts()<count_values>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed )
Digit Recognizer
3,823,766
df_store['Assortment'].value_counts()<count_values>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
3,823,766
df_store['StoreType'] = df_store['StoreType'].apply(lambda x: 1 if x == 'a' else(2 if x == 'b' else(3 if x == 'c' else 4))) df_store['StoreType'].value_counts()<count_values>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
3,823,766
df_store['Assortment'] = df_store['Assortment'].apply(lambda x: 1 if x == 'a' else(2 if x == 'b' else 3)) df_store['Assortment'].value_counts()<feature_engineering>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
3,823,766
df_store['CompetitionDistance'] = df_store['CompetitionDistance'].fillna(max(df_store['CompetitionDistance'])) df_store.info()<categorify>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
3,823,766
def mapping(features): for feature in features: temp_dict = {} temp_dict = pd.Series(df_store[feature].values, index = df_store['Store'] ).to_dict() df_train[feature] = df_train['Store'].map(temp_dict )<categorify>
epochs = 29 batch_size = 86
Digit Recognizer
3,823,766
mapping(['StoreType', 'Assortment', 'CompetitionDistance'] )<filter>
Digit Recognizer
3,823,766
df_train[df_train['Sales'] == 0]<filter>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
3,823,766
df_train[df_train['Open'] == 0]<count_values>
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
3,823,766
df_train[df_train['Open'] == 0]['Sales'].value_counts()<count_values>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
3,823,766
<count_values><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
3,566,234
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values>
import pandas as pd from sklearn.datasets import fetch_mldata import torch import numpy as np import torch.utils.data import matplotlib.pyplot as plt from scipy.io import loadmat import os import urllib
Digit Recognizer
3,566,234
df_train['StateHoliday'] = df_train['StateHoliday'].apply(lambda x: 1 if x == 'a' else(2 if x == 'b' else(3 if x == 'c' else x))) df_train['StateHoliday'].value_counts()<feature_engineering>
test_data = pd.read_csv('.. /input/test.csv') mnist_path = "mnist-original.mat" mnist_alternative_url = "https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat" response = urllib.request.urlopen(mnist_alternative_url) with open(mnist_path, "wb")as f: content = response.read() f.write(content) mnist_raw = loadmat(mnist_path) X = mnist_raw["data"].T y = mnist_raw["label"][0].astype(np.long) X_test = test_data.values X.shape, X_test.shape
Digit Recognizer
3,566,234
df_train['DayOfYear'] = df_train['Date'].map(lambda x: datetime.datetime.strptime(str(x),'%Y-%m-%d' ).timetuple().tm_yday) df_train.head(10 )<data_type_conversions>
def pairwise_distances(x, y): x_norm =(x**2 ).sum(1 ).view(-1, 1) y_t = torch.transpose(y, 0, 1) y_norm =(y**2 ).sum(1 ).view(1, -1) dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t) return torch.clamp(dist, 0.0, np.inf )
Digit Recognizer
3,566,234
df_train['Date'] = pd.to_datetime(df_train['Date'], format = '%Y-%m-%d' )<feature_engineering>
%%time cuda_test = torch.from_numpy(X_test ).cuda().float() cuda_train = torch.from_numpy(X ).cuda().float() ds = torch.utils.data.TensorDataset(cuda_test) _min_dists = [] _arg_min_dists = [] bs = 1000 for batch, in torch.utils.data.DataLoader(ds, batch_size=bs, pin_memory=False): min_dist, arg_min_dist = pairwise_distances(cuda_train, batch ).min(0) _min_dists.append(min_dist) _arg_min_dists.append(arg_min_dist )
Digit Recognizer
3,566,234
df_train['Year'] = df_train['Date'].map(lambda x: x.year) df_train.head()<drop_column>
min_dists = torch.cat(_min_dists) arg_min_dists = torch.cat(_arg_min_dists) print(f'Number of not found samples: {len(min_dists[min_dists>0])}' )
Digit Recognizer
3,566,234
<prepare_x_and_y><EOS>
sub = pd.read_csv('.. /input/sample_submission.csv') sub.Label = y[arg_min_dists.cpu() ] sub.to_csv('sub.csv', index=False) !head sub.csv
Digit Recognizer
4,795,843
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<split>
from fastai import * from fastai.vision import * from fastai.metrics import accuracy,error_rate
Digit Recognizer
4,795,843
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.1, random_state = 53) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state = 53 )<normalization>
class CustomImageItemList(ImageList): def open(self, fn): img = fn.reshape(28,28) img = np.stack(( img,)*3, axis=-1) return Image(pil2tensor(img, dtype=np.float32)) @classmethod def from_csv_custom(cls, path:PathOrStr, csv_name:str, imgIdx:int=1, header:str='infer', **kwargs)->'ItemList': df = pd.read_csv(Path(path)/csv_name, header=header) res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:,imgIdx:].apply(lambda x: x.values / 255.0, axis=1 ).values return res
Digit Recognizer
4,795,843
scaler = preprocessing.StandardScaler()<normalization>
path = '.. /input'
Digit Recognizer
4,795,843
X_train_scalled = scaler.fit_transform(X_train) X_val_scalled = scaler.transform(X_val) X_test_scalled = scaler.transform(X_test )<choose_model_class>
test = CustomImageItemList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0) data =(CustomImageItemList.from_csv_custom(path=path, csv_name='train.csv') .split_by_rand_pct (.2) .label_from_df(cols='label') .add_test(test, label=0) .databunch(bs=64, num_workers=0) .normalize(imagenet_stats))
Digit Recognizer
4,795,843
linreg = LinearRegression()<train_model>
learn = cnn_learner(data, models.resnet50, metrics=error_rate,model_dir="/tmp/model/" )
Digit Recognizer
4,795,843
linreg.fit(X_train_scalled, y_train )<predict_on_test>
learn.fit_one_cycle(4 )
Digit Recognizer
4,795,843
y_val_pred = linreg.predict(X_val_scalled )<predict_on_test>
learn.save("model_1", return_path=True )
Digit Recognizer
4,795,843
y_train_pred = linreg.predict(X_train_scalled )<create_dataframe>
learn.fit_one_cycle(1 )
Digit Recognizer
4,795,843
data = pd.DataFrame({'Actual':y_val, 'Predicted':y_val_pred}) data<compute_test_metric>
learn.load('model_1' )
Digit Recognizer
4,795,843
r2_score(y_val, y_val_pred )<compute_test_metric>
learn.lr_find()
Digit Recognizer
4,795,843
r2_score(y_train, y_train_pred )<compute_test_metric>
learn.unfreeze() learn.fit_one_cycle(10, max_lr=slice(1e-5,1e-4))
Digit Recognizer
4,795,843
mae = metrics.mean_absolute_error(y_val, y_val_pred) mse = metrics.mean_squared_error(y_val, y_val_pred) rmse = np.sqrt(metrics.mean_absolute_error(y_val, y_val_pred)) print("Mean Absolute Error") print(mae) print() print("Mean Squared Error") print(mse) print() print("Root Mean Squared Error") print(rmse )<create_dataframe>
predictions, *_ = learn.get_preds(DatasetType.Test) labels = np.argmax(predictions, 1) submission_df = pd.DataFrame({'ImageId': list(range(1,len(labels)+1)) , 'Label': labels}) submission_df.to_csv(f'submission.csv', index=False )
Digit Recognizer
5,056,419
evaluation = pd.DataFrame()<create_dataframe>
%matplotlib inline
Digit Recognizer
5,056,419
def evaluation_df(method, mae, mse, rmse, evaluation): temp_evaluation = pd.DataFrame({'Method':[method], 'MAE': [mae], 'MSE': [mse], 'RMSE': [rmse]}) evaluation = pd.concat([evaluation, temp_evaluation]) evaluation = evaluation[['Method', 'MAE', 'MSE', 'RMSE']] return evaluation<create_dataframe>
test = pd.read_csv(".. /input/test.csv") train = pd.read_csv(".. /input/train.csv" )
Digit Recognizer
5,056,419
evaluation = evaluation_df('Linear Regression', mae, mse, rmse, evaluation )<install_modules>
y_train = train["label"] y_train.head()
Digit Recognizer
5,056,419
!pip install xgboost<train_model>
x_train = train.drop(labels = ["label"],axis = 1 )
Digit Recognizer
5,056,419
%%time xgbreg = xgb.XGBRegressor() xgbreg.fit(X_train_scalled, y_train )<compute_test_metric>
y_train.value_counts()
Digit Recognizer
5,056,419
xgbreg.score(X_train_scalled, y_train )<predict_on_test>
x_train = x_train/255.0 test = test/255.0
Digit Recognizer
5,056,419
y_train_pred = xgbreg.predict(X_train_scalled )<compute_test_metric>
x_train = x_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1) print('x_train shape:', x_train.shape )
Digit Recognizer
5,056,419
r2_score(y_train, y_train_pred )<predict_on_test>
y_train = keras.utils.to_categorical(y_train,num_classes=10 )
Digit Recognizer
5,056,419
y_val_pred = xgbreg.predict(X_val_scalled )<compute_test_metric>
random_seed = 1
Digit Recognizer
5,056,419
r2_score(y_val, y_val_pred )<compute_test_metric>
from sklearn.model_selection import train_test_split
Digit Recognizer
5,056,419
mae = metrics.mean_absolute_error(y_val, y_val_pred) mse = metrics.mean_squared_error(y_val, y_val_pred) rmse = np.sqrt(metrics.mean_absolute_error(y_val, y_val_pred)) print("Mean Absolute Error") print(mae) print() print("Mean Squared Error") print(mse) print() print("Root Mean Squared Error") print(rmse )<compute_test_metric>
from sklearn.model_selection import train_test_split
Digit Recognizer
5,056,419
evaluation = evaluation_df('Extreme Gradient Boosting', mae, mse, rmse, evaluation )<choose_model_class>
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.1, random_state=random_seed )
Digit Recognizer
5,056,419
<predict_on_test>
from keras.layers import LeakyReLU
Digit Recognizer
5,056,419
<choose_model_class>
model = Sequential() model.add(Conv2D(32,(3,3), padding='Same', activation='relu', input_shape=(28, 28, 1))) model.add(Dropout(0.25)) model.add(Conv2D(32,(7,7),activation='relu')) model.add(Conv2D(128,(5,5),activation='relu')) model.add(MaxPool2D(( 2,2))) model.add(Conv2D(64,(3,3), padding='Same',activation='relu')) model.add(Dropout(0.25)) model.add(Conv2D(64,(3,3),activation='relu')) model.add(Flatten()) model.add(Dense(128, activation='linear')) model.add(LeakyReLU(alpha=.001)) model.add(Dense(64, activation='linear')) model.add(LeakyReLU(alpha=.001)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(324, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(32, activation='linear')) model.add(LeakyReLU(alpha=.001)) model.add(Dense(10, activation='softmax')) model.summary()
Digit Recognizer
5,056,419
%%time xgbreg = xgb.XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1, importance_type='gain', interaction_constraints='', learning_rate=0.45, max_delta_step=0, max_depth=7, min_child_weight=15, monotone_constraints='() ', n_estimators=120, n_jobs=16, num_parallel_tree=1, random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='exact', validate_parameters=1, verbosity=None) xgbreg.fit(X_train_scalled, y_train )<compute_test_metric>
optimizer = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0) model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
Digit Recognizer
5,056,419
xgbreg.score(X_train_scalled, y_train )<predict_on_test>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=20, zoom_range = 0.13, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(x_train )
Digit Recognizer
5,056,419
y_train_pred = xgbreg.predict(X_train_scalled )<compute_test_metric>
model.fit_generator( datagen.flow(x_train, y_train, batch_size=256), steps_per_epoch=len(x_train)//256, epochs=30, )
Digit Recognizer
5,056,419
r2_score(y_train, y_train_pred )<predict_on_test>
score = model.evaluate(x_val, y_val, verbose=1) print('Test loss:', score[0]) print('Test accuracy:', score[1] )
Digit Recognizer
5,056,419
y_val_pred = xgbreg.predict(X_val_scalled )<compute_test_metric>
result = model.predict(test) result = np.argmax(result,axis=1) result = pd.Series(result,name="Label" )
Digit Recognizer
5,056,419
r2_score(y_val, y_val_pred )<compute_test_metric>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),result],axis = 1) submission.to_csv("cnn_mnist.csv",index=False )
Digit Recognizer
7,836,969
mae = metrics.mean_absolute_error(y_val, y_val_pred) mse = metrics.mean_squared_error(y_val, y_val_pred) rmse = np.sqrt(metrics.mean_absolute_error(y_val, y_val_pred)) print("Mean Absolute Error") print(mae) print() print("Mean Squared Error") print(mse) print() print("Root Mean Squared Error") print(rmse )<create_dataframe>
digit_recon_tran_csv = pd.read_csv('/kaggle/input/digit-recognizer/train.csv',dtype = np.float32) digit_recon_test_csv = pd.read_csv('/kaggle/input/digit-recognizer/test.csv',dtype = np.float32 )
Digit Recognizer
7,836,969
evaluation = evaluation_df('Extreme Gradient Boosting Tuning 1', mae, mse, rmse, evaluation )<choose_model_class>
print('tran dataset size: ',digit_recon_tran_csv.size,' ') print('test dataset size: ',digit_recon_test_csv.size,' ' )
Digit Recognizer
7,836,969
<predict_on_test>
tran_label = digit_recon_tran_csv.label.values tran_image = digit_recon_tran_csv.loc[:,digit_recon_tran_csv.columns != "label"].values/255 test_image = digit_recon_test_csv.values/255
Digit Recognizer
7,836,969
<choose_model_class>
train_image, valid_image, train_label, valid_label = train_test_split(tran_image, tran_label, test_size = 0.2, random_state = 42 )
Digit Recognizer
7,836,969
%%time xgbreg = xgb.XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1, importance_type='gain', interaction_constraints='', learning_rate=0.3, max_delta_step=0, max_depth=6, min_child_weight=1, monotone_constraints='() ', n_estimators=3000, n_jobs=16, num_parallel_tree=1, random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='exact', validate_parameters=1, verbosity=None) xgbreg.fit(X_train_scalled, y_train )<predict_on_test>
print(torch.__version__) class MNIST_data(Dataset): def __init__(self, data, transform = transforms.Compose([transforms.ToPILImage() , transforms.RandomAffine(30,(0.1,0.1)) , transforms.ToTensor() ]) ): if len(data)== 1: self.X = data[0].reshape(-1,28,28) self.y = None else: self.X = data[0].reshape(-1,28,28) self.y = data[1].astype(np.long) self.transform = transform def __len__(self): return len(self.X) def __getitem__(self, idx): if self.y is not None: return self.transform(self.X[idx]), self.y[idx] else: return self.transform(self.X[idx] )
Digit Recognizer
7,836,969
y_train_pred = xgbreg.predict(X_train_scalled )<compute_test_metric>
batch_size = 64 train_dataset = MNIST_data(( train_image,train_label)) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) valid_dataset = MNIST_data(( valid_image,valid_label)) valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=False )
Digit Recognizer
7,836,969
r2_score(y_train, y_train_pred )<predict_on_test>
class YANNet(nn.Module): def __init__(self): super(YANNet,self ).__init__() self.conv = nn.Sequential( nn.Conv2d(1,8,3,1,1), nn.BatchNorm2d(8), nn.ReLU() , nn.Conv2d(8,16,3,1,1), nn.BatchNorm2d(16), nn.ReLU() , nn.MaxPool2d(2), nn.Conv2d(16,16,3,1,1), nn.BatchNorm2d(16), nn.ReLU() , nn.Conv2d(16,8,3,1,1), nn.BatchNorm2d(8), nn.ReLU() , nn.MaxPool2d(2) ) self.fc = nn.Sequential( nn.Linear(8*7*7,256), nn.BatchNorm1d(256), nn.ReLU() , nn.Dropout(0.5), nn.Linear(256,256), nn.BatchNorm1d(256), nn.ReLU() , nn.Dropout(0.5), nn.Linear(256,10) ) def forward(self, img): x = self.conv(img) o = self.fc(x.view(x.shape[0],-1)) return o
Digit Recognizer
7,836,969
y_val_pred = xgbreg.predict(X_val_scalled )<compute_test_metric>
model = YANNet() error = nn.CrossEntropyLoss() if torch.cuda.is_available() : model = model.cuda() error = error.cuda() optimizer = torch.optim.SGD(model.parameters() , lr=0.1) scheduler = lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1 )
Digit Recognizer
7,836,969
r2_score(y_val, y_val_pred )<compute_test_metric>
num_epoc = 120 for epoch in range(num_epoc): epoc_train_loss = 0.0 epoc_train_corr = 0.0 epoc_valid_corr = 0.0 print('Epoch:{}/{}'.format(epoch,num_epoc)) model.train() scheduler.step() for batch_idx,(images, labels)in enumerate(train_loader): if torch.cuda.is_available() : images = images.cuda() labels = labels.cuda() images = Variable(images) labels = Variable(labels) outputs = model(images) optimizer.zero_grad() loss = error(outputs,labels) loss.backward() optimizer.step() epoc_train_loss += loss.data outputs = torch.max(outputs.data,1)[1] epoc_train_corr += torch.sum(outputs==labels.data) with torch.no_grad() : model.eval() for batch_idx,(images, labels)in enumerate(valid_loader): if torch.cuda.is_available() : images = images.cuda() labels = labels.cuda() images = Variable(images) labels = Variable(labels) outputs = model(images) outputs = torch.max(outputs.data,1)[1] epoc_valid_corr += torch.sum(outputs==labels.data) print("loss is :{:.4f},Train Accuracy is:{:.4f}%,Test Accuracy is:{:.4f}%".format(epoc_train_loss/len(train_dataset),100*epoc_train_corr/len(train_dataset),100*epoc_valid_corr/len(valid_dataset)) )
Digit Recognizer
7,836,969
mae = metrics.mean_absolute_error(y_val, y_val_pred) mse = metrics.mean_squared_error(y_val, y_val_pred) rmse = np.sqrt(metrics.mean_absolute_error(y_val, y_val_pred)) print("Mean Absolute Error") print(mae) print() print("Mean Squared Error") print(mse) print() print("Root Mean Squared Error") print(rmse )<compute_test_metric>
model = model.cpu() model.eval()
Digit Recognizer
7,836,969
evaluation = evaluation_df('Extreme Gradient Boosting with Tuning 2', mae, mse, rmse, evaluation )<count_missing_values>
digit_recon_submission_csv = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv',dtype = np.float32) print(digit_recon_submission_csv.head(10))
Digit Recognizer
7,836,969
df_store.isnull().sum() * 100 / df_store.shape[0]<categorify>
for i in range(test_image.shape[0]): one_image = torch.from_numpy(test_image[i] ).view(1,1,28,28) one_output = model(one_image) test_results[i,0] = i+1 test_results[i,1] = torch.max(one_output.data,1)[1].numpy()
Digit Recognizer
7,836,969
<feature_engineering><EOS>
Data = {'ImageId': test_results[:, 0], 'Label': test_results[:, 1]} DataFrame = pd.DataFrame(Data) DataFrame.to_csv('submission.csv', index=False, sep=',' )
Digit Recognizer
4,223,111
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
print(os.listdir(".. /input")) seed = 4529 np.random.seed(seed )
Digit Recognizer
4,223,111
df_train.drop('CompetitionDistance', inplace = True, axis = 1) df_train.head()<prepare_x_and_y>
base_dir = os.path.join(".. ", "input") train_df = pd.read_csv(os.path.join(base_dir, "train.csv")) test_df = pd.read_csv(os.path.join(base_dir, "test.csv")) len(train_df )
Digit Recognizer
4,223,111
X = df_train.drop(['Sales', 'Store'], axis = 1) y = df_train['Sales']<split>
%load_ext tensorboard.notebook %tensorboard --logdir logs
Digit Recognizer
4,223,111
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.1, random_state = 53) X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size = 0.5, random_state = 53 )<choose_model_class>
x = train_df.drop(['label'], axis=1 ).values y = train_df['label'].values test_x = test_df.values
Digit Recognizer
4,223,111
%%time xgbreg = xgb.XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1, importance_type='gain', interaction_constraints='', learning_rate=0.3, max_delta_step=0, max_depth=6, min_child_weight=1, monotone_constraints='() ', n_estimators=3000, n_jobs=16, num_parallel_tree=1, random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='exact', validate_parameters=1, verbosity=None) xgbreg.fit(X_train, y_train )<predict_on_test>
x = x.reshape(-1, 28, 28, 1) x = x / 255.0 test_x = test_x.reshape(-1, 28, 28, 1) test_x = test_x / 255.0 y = to_categorical(y, num_classes=10) x_train, x_val, y_train, y_val = train_test_split(x, y, test_size = 0.1, random_state=seed )
Digit Recognizer
4,223,111
y_train_pred = xgbreg.predict(X_train )<compute_test_metric>
model = Sequential([ Conv2D(128,(3,3), activation="relu", input_shape=(28, 28, 1)) , BatchNormalization() , Conv2D(128,(3,3), activation="relu"), BatchNormalization() , MaxPooling2D(2,2), Dropout(0.2), Conv2D(64,(3,3), activation="relu"), BatchNormalization() , Conv2D(64,(3,3), activation="relu"), BatchNormalization() , MaxPooling2D(2,2), Dropout(0.2), Flatten() , Dense(units=256, activation='relu'), Dropout(0.4), Dense(units=256, activation='relu'), Dropout(0.4), Dense(units=10, activation='softmax') ]) model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['acc']) model.summary()
Digit Recognizer
4,223,111
r2_score(y_train, y_train_pred )<predict_on_test>
batch_size = 128 epochs = 30 datagen = ImageDataGenerator(rotation_range=15, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.15, shear_range=0.15) datagen.fit(x_train) history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), validation_data=(x_val, y_val), steps_per_epoch = x_train.shape[0] // batch_size, epochs=epochs, callbacks=callbacks)
Digit Recognizer
4,223,111
<compute_test_metric><EOS>
pred = model.predict(test_x) pred = np.argmax(pred, axis=1) pred = pd.Series(pred, name="Label") test_df = pd.concat([pd.Series(range(1,28001), name = "ImageId"), pred],axis = 1) test_df.to_csv('mnist-submission.csv', index = False )
Digit Recognizer
4,297,932
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_test_metric>
%matplotlib inline np.random.seed(2) sns.set(style='white', context='notebook', palette='deep' )
Digit Recognizer
4,297,932
mae = metrics.mean_absolute_error(y_val, y_val_pred) mse = metrics.mean_squared_error(y_val, y_val_pred) rmse = np.sqrt(metrics.mean_absolute_error(y_val, y_val_pred)) print("Mean Absolute Error") print(mae) print() print("Mean Squared Error") print(mse) print() print("Root Mean Squared Error") print(rmse )<compute_test_metric>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
4,297,932
evaluation = evaluation_df('Extreme Gradient Boosting with Change in Data', mae, mse, rmse, evaluation )<prepare_x_and_y>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
4,297,932
X = df_train.drop(['Sales', 'Year'], axis = 1) y = df_train['Sales']<split>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
4,297,932
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.1, random_state = 53) X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size = 0.5, random_state = 53 )<choose_model_class>
random_seed = 2
Digit Recognizer
4,297,932
%%time xgbreg = xgb.XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1, importance_type='gain', interaction_constraints='', learning_rate=0.3, max_delta_step=0, max_depth=5, min_child_weight=1, monotone_constraints='() ', n_estimators=4500, n_jobs=16, num_parallel_tree=1, random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='exact', validate_parameters=1, verbosity=None) xgbreg.fit(X_train, y_train )<predict_on_test>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.05, random_state=random_seed )
Digit Recognizer