kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
4,297,932
y_train_pred = xgbreg.predict(X_train )<compute_test_metric>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
4,297,932
r2_score(y_train, y_train_pred )<predict_on_test>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
4,297,932
y_val_pred = xgbreg.predict(X_val )<compute_test_metric>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
4,297,932
r2_score(y_val, y_val_pred )<predict_on_test>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
4,297,932
y_test_pred = xgbreg.predict(X_test )<compute_test_metric>
epochs = 30 batch_size = 86
Digit Recognizer
4,297,932
r2_score(y_test, y_test_pred )<compute_test_metric>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
4,297,932
mae = metrics.mean_absolute_error(y_val, y_val_pred) mse = metrics.mean_squared_error(y_val, y_val_pred) rmse = np.sqrt(metrics.mean_absolute_error(y_val, y_val_pred)) print("Mean Absolute Error") print(mae) print() print("Mean Squared Error") print(mse) print() print("Root Mean Squared Error") print(rmse )<compute_test_metric>
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
4,297,932
evaluation = evaluation_df('Extreme Gradient Boosting with Change in Data and tuning', mae, mse, rmse, evaluation )<load_from_csv>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
4,297,932
df_test = pd.read_csv('.. /input/rossmann-store-sales/test.csv') df_test.head()<count_missing_values>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("submission.csv",index=False )
Digit Recognizer
1,480,912
df_test.isnull().sum()<filter>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") print(train.shape, test.shape )
Digit Recognizer
1,480,912
df_test[df_test['Open'].isnull() ]<count_missing_values>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) del train
Digit Recognizer
1,480,912
df_test[df_test['Open'].isnull() ]['Date'].value_counts()<count_values>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
1,480,912
df_test[df_test['Store'] == 622]['Open'].value_counts()<feature_engineering>
X_train = X_train.values.reshape(X_train.shape[0], 28, 28, 1) X_test = test.values.reshape(test.shape[0], 28, 28, 1) Y_train = to_categorical(Y_train, num_classes = 10) Y_train
Digit Recognizer
1,480,912
df_test['Open'] = df_test['Open'].fillna(1) df_test.isnull().sum()<categorify>
from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization from keras.utils import np_utils
Digit Recognizer
1,480,912
def mapping(features): for feature in features: temp_dict = {} temp_dict = pd.Series(df_store[feature].values, index = df_store['Store'] ).to_dict() df_test[feature] = df_test['Store'].map(temp_dict )<categorify>
model = Sequential() dim = 28 nclasses = 10 model.add(Conv2D(filters=32, kernel_size=(5,5), padding='same', activation='relu', input_shape=(dim,dim,1))) model.add(Conv2D(filters=32, kernel_size=(5,5), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.2)) model.add(Conv2D(filters=64, kernel_size=(5,5), padding='same', activation='relu')) model.add(Conv2D(filters=64, kernel_size=(5,5), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(120, activation='relu')) model.add(Dense(84, activation='relu')) model.add(Dense(nclasses, activation='softmax'))
Digit Recognizer
1,480,912
mapping(['StoreType', 'Assortment', 'Promo2', 'CompetitionDistance'] )<feature_engineering>
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
1,480,912
df_test['CompetitionDistanceLog10'] = np.log10(df_test['CompetitionDistance']) df_test.head()<drop_column>
learning_rate_reduction = ReduceLROnPlateau(monitor='acc', patience=3, verbose=1, factor=0.5, min_lr=1e-7) datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train) history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=512), epochs=50, verbose=1, steps_per_epoch=X_train.shape[0]/512, callbacks=[learning_rate_reduction] )
Digit Recognizer
1,480,912
df_test.drop('CompetitionDistance', inplace = True, axis = 1) df_test.head()<feature_engineering>
loss_and_metrics = model.evaluate(X_train, Y_train, batch_size=128) print(loss_and_metrics )
Digit Recognizer
1,480,912
df_test['DayOfYear'] = df_test['Date'].map(lambda x: datetime.datetime.strptime(str(x),'%Y-%m-%d' ).timetuple().tm_yday) df_test.head(10 )<data_type_conversions>
pred = model.predict_classes(X_test) submission = pd.DataFrame({ "ImageId": list(range(1,len(pred)+ 1)) , "Label": pred}) submission.to_csv("submission.csv", index=False, header=True )
Digit Recognizer
10,298,431
df_test['Date'] = pd.to_datetime(df_test['Date'], format = '%Y-%m-%d' )<feature_engineering>
d_train = pd.read_csv(".. /input/digit-recognizer/train.csv") d_test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
10,298,431
df_test['Year'] = df_test['Date'].map(lambda x: x.year )<drop_column>
d_test.head()
Digit Recognizer
10,298,431
df_test.drop('Date', inplace = True, axis = 1 )<count_values>
x_train = d_train.iloc[:, 1:].to_numpy(dtype = np.float) x_train = np.reshape(x_train, newshape =(len(x_train), 1, 28, 28)) y_train = d_train['label'].to_numpy() print(x_train.shape) print(y_train.shape) x_test = d_test.to_numpy(dtype = np.float) x_test = np.reshape(x_test, newshape =(len(x_test), 1, 28, 28)) print(x_test.shape )
Digit Recognizer
10,298,431
df_test['StateHoliday'].value_counts()<count_values>
x_train = x_train / 255 x_test = x_test / 255 print(x_train[0]) print(x_test[0] )
Digit Recognizer
10,298,431
df_test['StateHoliday'] = df_test['StateHoliday'].apply(lambda x: 1 if x == 'a' else(2 if x == 'b' else(3 if x == 'c' else x))) df_test['StateHoliday'].value_counts()<feature_engineering>
X_tr = torch.from_numpy(x_train ).type(torch.FloatTensor) Y_tr = torch.from_numpy(y_train) X_te = torch.from_numpy(x_test ).type(torch.FloatTensor) Y_te = torch.from_numpy(np.zeros(x_test.shape)) X_train, X_va, Y_train, Y_va = train_test_split(X_tr, Y_tr, test_size = 0.2) train_set = torch.utils.data.TensorDataset(X_train, Y_train) val_set = torch.utils.data.TensorDataset(X_va, Y_va) test_set = torch.utils.data.TensorDataset(X_te, Y_te) trainloader = torch.utils.data.DataLoader(train_set, batch_size = 32, shuffle = False) valloader = torch.utils.data.DataLoader(val_set, batch_size = 32, shuffle = False) testloader = torch.utils.data.DataLoader(test_set, batch_size = 32, shuffle = False )
Digit Recognizer
10,298,431
df_test['StateHoliday'] = df_test['StateHoliday'].map(lambda x: 0 if x == '0' else x) type(df_test['StateHoliday'][0] )<filter>
class NMIST_CNN(nn.Module): def __init__(self): super(NMIST_CNN, self ).__init__() self.cn1 = nn.Conv2d(1, 16, kernel_size = 4, padding = 2) nn.init.xavier_normal_(self.cn1.weight) nn.init.zeros_(self.cn1.bias) self.bn1 = nn.BatchNorm2d(num_features = 16) self.cn2 = nn.Conv2d(16, 32, kernel_size = 4) nn.init.xavier_normal_(self.cn2.weight) nn.init.zeros_(self.cn2.bias) self.bn2 = nn.BatchNorm2d(num_features = 32) self.fc1 = nn.Linear(800, 200) nn.init.xavier_normal_(self.fc1.weight) nn.init.zeros_(self.fc1.bias) self.bn3 = nn.BatchNorm1d(num_features = 200) self.fc2 = nn.Linear(200, 10) self.bn4 = nn.BatchNorm1d(num_features = 10) self.dropout = nn.Dropout(p = 0.2) self.pooling = nn.MaxPool2d(( 2,2)) def forward(self, x): x = F.relu(self.bn1(self.cn1(x))) x = self.pooling(x) x = self.dropout(x) x = F.relu(self.bn2(self.cn2(x))) x = self.pooling(x) x = self.dropout(x) x = x.view(-1, 800) x = F.relu(self.bn3(self.fc1(x))) x = self.dropout(x) x = F.log_softmax(self.bn4(self.fc2(x)) , dim = 1) return x
Digit Recognizer
10,298,431
df_test_open = df_test[df_test['Open'] == 1] df_test_closed = df_test[df_test['Open'] == 0]<feature_engineering>
model = NMIST_CNN() print(model )
Digit Recognizer
10,298,431
df_test_closed['Sales'] = 0<drop_column>
criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters() , lr=0.003) epochs = 30 train_losses, test_losses, train_accuracies, test_accuracies = [], [], [], [] for e in range(epochs): tr_loss = 0 tr_accuracy = 0 model.train() for images, labels in trainloader: optimizer.zero_grad() results = model(images) loss = criterion(results, labels) loss.backward() optimizer.step() tr_loss += loss.item() top_p, top_class = results.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) tr_accuracy += torch.mean(equals.type(torch.FloatTensor)) else: te_loss = 0 te_accuracy = 0 with torch.no_grad() : model.eval() for images, labels in valloader: test_results = model(images) loss2 = criterion(test_results, labels) te_loss += loss2.item() top_p, top_class = test_results.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) te_accuracy += torch.mean(equals.type(torch.FloatTensor)) train_losses.append(tr_loss/len(trainloader)) train_accuracies.append(tr_accuracy.item() /len(trainloader)) test_losses.append(te_loss/len(valloader)) test_accuracies.append(te_accuracy.item() /len(valloader)) print("Epoch: " + str(e+1)) print("Train loss: " + str(tr_loss/len(trainloader))) print(f'Train accuracy: {tr_accuracy.item() *100/len(trainloader)}%') print("Validation loss: " + str(te_loss/len(valloader))) print(f'Validation accuracy: {te_accuracy.item() *100/len(valloader)}%') print('' )
Digit Recognizer
10,298,431
df_test_open.drop('Open', inplace = True, axis = 1) df_test_closed.drop('Open', inplace = True, axis = 1 )<drop_column>
prediction = [] with torch.no_grad() : model.eval() for images, x in testloader: predictions = model(images) top_p, top_class = predictions.topk(1, dim=1) for n in range(len(predictions)) : prediction.append(top_class[n].item()) submit = pd.DataFrame(data={'Label':prediction}) submit.insert(0, 'ImageId', range(1, 1 + len(submit)) , True) submit.head()
Digit Recognizer
10,298,431
<drop_column><EOS>
submit.to_csv('submission.csv', index = False) submit.head()
Digit Recognizer
6,166,956
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
import keras from keras.datasets import mnist from sklearn.model_selection import train_test_split import numpy as np import pandas as pd
Digit Recognizer
6,166,956
cols = ['Store', 'DayOfWeek', 'Promo', 'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment', 'DayOfYear', 'Promo2', 'CompetitionDistanceLog10'] X = X[cols]<predict_on_test>
batch_size = 128 num_classes = 10 epochs = 30
Digit Recognizer
6,166,956
X_pred = xgbreg.predict(X )<prepare_output>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
6,166,956
df_test_open['Sales'] = X_pred<concatenate>
y_train = train["label"] x_train = train.drop(labels=["label"], axis=1 )
Digit Recognizer
6,166,956
df_test_final = pd.concat([df_test_open, df_test_closed] )<load_from_csv>
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train , random_state = 42, test_size=0.3 )
Digit Recognizer
6,166,956
sample_sub = pd.read_csv('.. /input/rossmann-store-sales/sample_submission.csv') sample_sub.head()<categorify>
from keras import backend
Digit Recognizer
6,166,956
temp_dict = {} temp_dict = pd.Series(df_test_final['Sales'].values, index = df_test_final['Id'] ).to_dict() sample_sub['Sales'] = sample_sub['Id'].map(temp_dict )<save_to_csv>
x_train = x_train.astype('float') x_test = x_test.astype('float') test = test.astype('float') x_train /= 255 x_test /= 255 test/= 255
Digit Recognizer
6,166,956
sample_sub.to_csv('sample_submission.csv', header = ['Id', 'Sales'], index = False )<load_from_csv>
y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes )
Digit Recognizer
6,166,956
data_path = '/kaggle/input/porto-seguro-safe-driver-prediction/' train = pd.read_csv(data_path + 'train.csv', index_col='id') test = pd.read_csv(data_path + 'test.csv', index_col='id') submission = pd.read_csv(data_path + 'sample_submission.csv', index_col='id' )<concatenate>
model = Sequential()
Digit Recognizer
6,166,956
all_data = pd.concat([train, test], ignore_index=True) all_data = all_data.drop('target', axis=1) all_features = all_data.columns.tolist()<feature_engineering>
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
Digit Recognizer
6,166,956
all_data['num_missing'] =(all_data==-1 ).sum(axis=1) remaining_features = [col for col in all_features \ if('cat' not in col and 'calc' not in col)] remaining_features.append('num_missing' )<categorify>
model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=input_shape)) model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64,(3, 3), activation='relu')) model.add(Conv2D(64,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax'))
Digit Recognizer
6,166,956
cat_features = [col for col in all_features if 'cat' in col] onehot_encoder = OneHotEncoder() encoded_cat_matrix = onehot_encoder.fit_transform(all_data[cat_features] )<data_type_conversions>
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.RMSprop(lr = 0.001, rho=0.9, epsilon=1e-08, decay = 0.0), metrics=['accuracy'] )
Digit Recognizer
6,166,956
ind_features = [col for col in all_features if 'ind' in col] first_col=True for col in ind_features: if first_col: all_data['mix_ind'] = all_data[col].astype(str)+'_' first_col = False else: all_data['mix_ind'] += all_data[col].astype(str)+'_'<count_values>
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
Digit Recognizer
6,166,956
cat_count_features = [] for col in cat_features+['mix_ind']: val_counts_dic = all_data[col].value_counts().to_dict() all_data[f'{col}_count'] = all_data[col].apply(lambda x: val_counts_dic[x]) cat_count_features.append(f'{col}_count' )<drop_column>
from keras.preprocessing.image import ImageDataGenerator
Digit Recognizer
6,166,956
drop_features = ['ps_ind_14', 'ps_ind_10_bin','ps_ind_11_bin', 'ps_ind_12_bin','ps_ind_13_bin','ps_car_14'] all_data_remaining = all_data[remaining_features+cat_count_features].drop(drop_features, axis=1) all_data_sprs = sparse.hstack([sparse.csr_matrix(all_data_remaining), encoded_cat_matrix], format='csr' )<prepare_x_and_y>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.3, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(x_train )
Digit Recognizer
6,166,956
num_train = train.shape[0] X = all_data_sprs[:num_train] X_test = all_data_sprs[num_train:] y = train['target'].values<compute_train_metric>
annealer_callback = ReduceLROnPlateau(monitor = 'val_accuracy', patience = 3, verbose = 1, factor = 0.5, min_lr = 0.00001 )
Digit Recognizer
6,166,956
def eval_gini(y_true, y_pred): assert y_true.shape == y_pred.shape n_samples = y_true.shape[0] L_mid = np.linspace(1 / n_samples, 1, n_samples) pred_order = y_true[y_pred.argsort() ] L_pred = np.cumsum(pred_order)/ np.sum(pred_order) G_pred = np.sum(L_mid - L_pred) true_order = y_true[y_true.argsort() ] L_true = np.cumsum(true_order)/ np.sum(true_order) G_true = np.sum(L_mid - L_true) return G_pred / G_true<compute_test_metric>
model.fit_generator(generator=datagen.flow(x_train, y_train, batch_size = batch_size), steps_per_epoch= x_train.shape[0] // batch_size, epochs=epochs, validation_data=(x_test,y_test), verbose = 2, callbacks = [annealer_callback] )
Digit Recognizer
6,166,956
def gini_lgb(preds, dtrain): labels = dtrain.get_label() return 'gini', eval_gini(labels, preds), True<compute_test_metric>
score = model.evaluate(x_test, y_test, verbose=0) score
Digit Recognizer
6,166,956
def gini_xgb(preds, dtrain): labels = dtrain.get_label() return 'gini', eval_gini(labels, preds )<choose_model_class>
results = model.predict(test) results = np.argmax(results,axis = 1 )
Digit Recognizer
6,166,956
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1991 )<init_hyperparams>
submission_df = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv" )
Digit Recognizer
6,166,956
max_params_lgb = {'bagging_fraction': 0.8043696643500143, 'feature_fraction': 0.6829323879981047, 'lambda_l1': 0.9264555612104627, 'lambda_l2': 0.9774233689434216, 'min_child_samples': 10, 'min_child_weight': 125.68433948868649, 'num_leaves': 28, 'objective': 'binary', 'learning_rate': 0.01, 'bagging_freq': 1, 'verbosity': 0, 'random_state': 1991}<split>
submission_df["Label"] = results
Digit Recognizer
6,166,956
<init_hyperparams><EOS>
submission_df.to_csv("submission.csv", index = False )
Digit Recognizer
5,052,155
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<split>
%matplotlib inline sns.set(style='white', context='notebook', palette='deep') warnings.filterwarnings('ignore') train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') print(train.shape) Y_train = train['label'] X_train = train.drop(labels = ["label"],axis = 1) fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111) sns.countplot(Y_train); ax.set_title(f'Labelled Digits Count({Y_train.size} total)', size=20); ax.set_xlabel('Digits'); for patch in ax.patches: ax.annotate('{:}'.format(patch.get_height()),(patch.get_x() +0.1, patch.get_height() -150), color='w' )
Digit Recognizer
5,052,155
oof_val_preds_xgb = np.zeros(X.shape[0]) oof_test_preds_xgb = np.zeros(X_test.shape[0]) for idx,(train_idx, valid_idx)in enumerate(folds.split(X, y)) : print(' X_train, y_train = X[train_idx], y[train_idx] X_valid, y_valid = X[valid_idx], y[valid_idx] dtrain = xgb.DMatrix(X_train, y_train) dvalid = xgb.DMatrix(X_valid, y_valid) dtest = xgb.DMatrix(X_test) watchlist = [(dtrain, 'train'),(dvalid, 'valid')] xgb_model = xgb.train(params=max_params_xgb, dtrain=dtrain, num_boost_round=1000, evals=watchlist, maximize=True, feval=gini_xgb, early_stopping_rounds=150, verbose_eval=100) best_iter = xgb_model.best_iteration oof_test_preds_xgb += xgb_model.predict(dtest, ntree_limit=best_iter)/folds.n_splits oof_val_preds_xgb[valid_idx] += xgb_model.predict(dvalid, ntree_limit=best_iter) gini_score = eval_gini(y_valid, oof_val_preds_xgb[valid_idx]) print(f'Fold {idx+1} gini score: {gini_score} ' )<compute_test_metric>
def check_missing_data(df): flag=df.isna().sum().any() if flag==True: total = df.isnull().sum() percent =(df.isnull().sum())/(df.isnull().count() *100) output = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) data_type = [] for col in df.columns: dtype = str(df[col].dtype) data_type.append(dtype) output['Types'] = data_type return(np.transpose(output)) else: return(False) check_missing_data(train) check_missing_data(test )
Digit Recognizer
5,052,155
print('LightGBM OOF Gini Score:', eval_gini(y, oof_val_preds_lgb))<compute_test_metric>
X_train = X_train / 255.0 test = test / 255.0 mean_px = X_train.mean().astype(np.float32) std_px = X_train.std().astype(np.float32) def standardize(x): return(x-mean_px)/std_px X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1) for i in range(6, 9): plt.subplot(330 +(i+1)) plt.imshow(X_train[i,:, :, 0], cmap=plt.get_cmap('gray')) plt.title(f'Label: {Y_train[i]}'); Y_train = to_categorical(Y_train, num_classes = 10) train_x, test_x, train_y, test_y = train_test_split(X_train, Y_train, test_size = 0.1 )
Digit Recognizer
5,052,155
print('XGBoost OOF Gini Score:', eval_gini(y, oof_val_preds_xgb))<save_to_csv>
validation_split = 0.15 img_gen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False, validation_split=validation_split) batch_size = 86 train_gen = img_gen.flow(x=train_x, y=train_y, subset='training', batch_size=batch_size) valid_gen = img_gen.flow(x=train_x, y=train_y, subset='validation', batch_size=batch_size) visualizaion_flow = img_gen.flow(train_x, train_y, batch_size=1, shuffle=False) fig, axs = plt.subplots(2, 4, figsize=(20,10)) fig.suptitle('Augmentation Results', size=32) for axs_col in range(axs.shape[1]): idx = np.random.randint(0, train_x.shape[0]) img = train_x[idx,:, :, 0] aug_img, aug_label = visualizaion_flow[idx] axs[0, axs_col].imshow(img, cmap='gray'); axs[0, axs_col].set_title(f'example axs[1, axs_col].imshow(aug_img[0, :, :, 0], cmap='gray'); axs[1, axs_col].set_title(f'Augmented example
Digit Recognizer
5,052,155
oof_test_preds = oof_test_preds_lgb * 0.6 + oof_test_preds_xgb * 0.4 submission['target'] = oof_test_preds submission.to_csv('submission.csv' )<import_modules>
seed = 43 np.random.seed(seed) learning_rate=0.00065 optimizer = Adam(lr=learning_rate) def conv_block(x, filters, kernel_size, strides, layer_no, use_pool=False, padding='same'): x = Convolution2D(filters=filters, kernel_size=kernel_size, strides=strides, name=f'conv{layer_no}', padding=padding )(x) x = BatchNormalization(name=f'bn{layer_no}' )(x) x = Activation('relu', name=f'activation{layer_no}' )(x) if use_pool: x = MaxPooling2D(pool_size=[2, 2], strides=[2, 2], name=f'pool{layer_no}', padding='same' )(x) return x def build_model(X): h, w, c = X.shape[1:] X = Input(shape=(h, w, c)) conv1 = conv_block(X, filters=8, kernel_size=[3, 3], strides=[1, 1], layer_no=1) conv2 = conv_block(conv1, filters=16, kernel_size=[2, 2], strides=[1, 1], layer_no=2) conv3 = conv_block(conv2, filters=32, kernel_size=[2, 2], strides=[1, 1], layer_no=3, use_pool=True) conv4 = conv_block(conv3, filters=64, kernel_size=[3, 3], strides=[2, 2], layer_no=4) conv5 = conv_block(conv4, filters=128, kernel_size=[2, 2], strides=[1, 1], layer_no=5) conv6 = conv_block(conv5, filters=256, kernel_size=[2, 2], strides=[1, 1], layer_no=6, use_pool=True) flat1 = Flatten(name='flatten1' )(conv6) drop1 = Dropout(0.35, name='Dopout1' )(flat1) dens1 = Dense(128, name='dense1' )(drop1) bn7 = BatchNormalization(name='bn7' )(dens1) drop2 = Dropout(0.35, name='Dopout2' )(bn7) relu1 = Activation('relu', name='activation7' )(drop2) dens1 = Dense(256, name='dense01' )(relu1) bn7 = BatchNormalization(name='bn07' )(dens1) drop2 = Dropout(0.5, name='Dopout02' )(bn7) relu1 = Activation('relu', name='activation07' )(drop2) dens2 = Dense(10, name='dense2' )(relu1) bn8 = BatchNormalization(name='bn8' )(dens2) output_layer = Activation('softmax', name='softmax' )(bn8) model = Model(inputs=X, outputs=output_layer) model.compile(optimizer, loss='categorical_crossentropy', metrics=['accuracy']) return model def build_seq_model(X): h, w, c = X.shape[1:] model = Sequential([ Convolution2D(32,(3,3), activation='relu', input_shape =(h, w, c)) , BatchNormalization(axis=1), Convolution2D(32,(3,3), activation='relu'), MaxPooling2D() , BatchNormalization(axis=1), Convolution2D(64,(3,3), activation='relu'), BatchNormalization(axis=1), Convolution2D(64,(3,3), activation='relu'), MaxPooling2D() , Dropout(0.25), Flatten() , Dense(512, activation='relu'), Dropout(0.25), Dense(1024, activation='relu'), Dropout(0.5), Dense(10, activation='softmax') ]) model.compile(optimizer, loss='categorical_crossentropy', metrics=['accuracy']) return model model = build_model(train_x) learning_rate_annealer = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001) epochs = 10 hist = model.fit_generator(generator=train_gen, steps_per_epoch=train_x.shape[0]*(1-validation_split)//batch_size, epochs=epochs, validation_data=valid_gen, validation_steps=train_x.shape[0]*validation_split//batch_size, callbacks=[learning_rate_annealer]) fig, ax = plt.subplots(2,1) ax[0].plot(hist.history['loss'], color='b', label="Training loss") ax[0].plot(hist.history['val_loss'], color='r', label="validation loss",axes =ax[0]) legend = ax[0].legend(loc='best', shadow=True) ax[1].plot(hist.history['acc'], color='b', label="Training accuracy") ax[1].plot(hist.history['val_acc'], color='r',label="Validation accuracy") legend = ax[1].legend(loc='best', shadow=True )
Digit Recognizer
5,052,155
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from numpy import nan<load_from_csv>
def calculate_performance(labels, pred, dataset): pred_cat = np.argmax(pred, axis=1) labels_cat = np.argmax(labels, axis=1) corrects =(pred_cat == labels_cat) falses = dataset[~corrects] falses_labels = labels_cat[~corrects] falses_preds = pred[~corrects] examples_num = labels.shape[0] accuracy = np.count_nonzero(corrects)/ examples_num return accuracy, [falses, falses_labels, falses_preds], [labels_cat, pred_cat] test_y_pred = model.predict(test_x) test_accuracy,(test_falses, test_falses_labels, test_falses_preds),(true_labels, pred_labels)= calculate_performance(test_y, test_y_pred, test_x) test_f1 = f1_score(y_pred=pred_labels, y_true=true_labels, average='micro') print(f'Test Dataset Accuracy: {np.round(test_accuracy*100, 3)}%') print(f'F1 Score = {test_f1}') plt.figure(figsize=(10, 10)) test_matrix = confusion_matrix(y_pred=pred_labels, y_true=true_labels) sns.heatmap(data=test_matrix, annot=True, cmap='Blues', fmt=f'.0f') plt.title('Confusion Matrix - Test Dataset', size=24) plt.xlabel('Predictions', size=20); plt.ylabel('Labels', size=20); final_loss, final_acc = model.evaluate(test_x, test_y, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
Digit Recognizer
5,052,155
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv' )<prepare_x_and_y>
styles=[':','-.','--','-',':','-.','--','-',':','-.','--','-'] nets = 3 model = [0] *nets for j in range(3): model[j] = Sequential() model[j].add(Conv2D(j*16+16,kernel_size=5,activation='relu',input_shape=(28,28,1))) model[j].add(MaxPool2D()) model[j].add(Conv2D(j*32+32,kernel_size=5,activation='relu')) model[j].add(MaxPool2D()) model[j].add(Flatten()) model[j].add(Dense(256, activation='relu')) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.333) history = [0] * nets names = ["16 maps","32 maps","64 maps"] epochs = 10 for j in range(nets): history[j] = model[j].fit(X_train2,Y_train2, batch_size=80, epochs = epochs, validation_data =(X_val2,Y_val2), verbose=0) print("CNN {0}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( names[j],epochs,max(history[j].history['acc']),max(history[j].history['val_acc']))) plt.figure(figsize=(15,5)) for i in range(nets): plt.plot(history[i].history['val_acc'],linestyle=styles[i]) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(names, loc='upper left') axes = plt.gca() axes.set_ylim([0.98,1]) plt.show()
Digit Recognizer
5,052,155
y_train = train['SalePrice'] train.drop(['SalePrice'],axis=1,inplace=True) features = pd.concat([train, test]) features.shape<drop_column>
model = Sequential() model.add(Conv2D(32,kernel_size=3,activation='relu',input_shape=(28,28,1))) model.add(BatchNormalization()) model.add(Conv2D(32,kernel_size=3,activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(64,kernel_size=3,activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64,kernel_size=3,activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64,kernel_size=5,strides=2,padding='same',activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(10, activation='softmax')) model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 **(x+epochs)) hist = model.fit_generator(img_gen.flow(X_train,Y_train, batch_size=64), epochs = 100, steps_per_epoch = X_train.shape[0]//64, callbacks=[annealer], verbose=0) print("Train accuracy={0:.5f}, Train loss={1:.5f}".format(max(hist.history['acc']),max(hist.history['loss'])) )
Digit Recognizer
5,052,155
<data_type_conversions><EOS>
predictions = model.predict(test) predictions = np.argmax(predictions,axis = 1) submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) , "Label": predictions}) submissions.to_csv("submission.csv", index=False, header=True) print(submissions.head(5))
Digit Recognizer
4,439,703
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify>
%matplotlib inline
Digit Recognizer
4,439,703
for column in features.columns : if features[column].dtype == object : df_dummies = pd.get_dummies(features[column],drop_first=True,prefix=column) features.drop([column],axis=1,inplace=True) features = pd.concat([features,df_dummies],axis=1) features.head()<split>
training_data = pd.read_csv('.. /input/train.csv') testing_data = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
4,439,703
x_train = features.iloc[:len(y_train), :] x_test = features.iloc[len(x_train):, :]<normalization>
label = training_data.label.value_counts() label
Digit Recognizer
4,439,703
def bi_directional_elimination(x, y): Sig_Level = 0.05; features = x.columns.tolist() selected_features = [] while(len(features)> 0): remain_features = list(set(features)- set(selected_features)) pval = pd.Series(index=remain_features, dtype='float64') for column in remain_features : model = sm.OLS(y, sm.add_constant(x[selected_features + [column]])).fit() pval[column] = model.pvalues[column] min_pval = pval.min() if(min_pval < Sig_Level): selected_features.append(pval.idxmin()) while(len(selected_features)> 0): selected_features_with_constant = sm.add_constant(x[selected_features]) p_values = sm.OLS(y, selected_features_with_constant ).fit().pvalues[1:] max_pval = p_values.max() if(max_pval >= Sig_Level): removed_feature = p_values.idxmax() selected_features.remove(removed_feature) else : break else : break return selected_features<compute_test_metric>
train_data = training_data.drop(['label'], axis=1 ).values.astype('float32') target = training_data['label'].values.astype('int32') test_data = testing_data.values.astype('float32') train_data = train_data.reshape(train_data.shape[0], 28, 28)/ 255.0 test_data = test_data.reshape(test_data.shape[0], 28, 28)/ 255.0
Digit Recognizer
4,439,703
selected_features = bi_directional_elimination(x_train, y_train )<create_dataframe>
num = 10 target = keras.utils.to_categorical(target,num )
Digit Recognizer
4,439,703
x_train = x_train[selected_features].copy() x_test = x_test[selected_features].copy()<init_hyperparams>
num_classes = 10 input_shape =(28, 28, 1 )
Digit Recognizer
4,439,703
regressor = xgboost.XGBRegressor() booster=['gbtree','gblinear'] base_score=[0.25,0.5,0.75,1] n_estimators = [100, 500, 900, 1100, 1500] max_depth = [2, 3, 5, 10, 15] learning_rate=[0.05,0.1,0.15,0.20] min_child_weight=[1,2,3,4] hyperparameter_grid = { 'n_estimators': n_estimators, 'max_depth':max_depth, 'learning_rate':learning_rate, 'min_child_weight':min_child_weight, 'booster':booster, 'base_score':base_score } random_cv = RandomizedSearchCV(estimator=regressor, param_distributions=hyperparameter_grid, cv=5, n_iter=50, scoring = 'neg_mean_absolute_error',n_jobs = 4, verbose = 5, return_train_score = True, random_state=42) random_cv.fit(x_train,y_train )<find_best_params>
X_train, X_val, Y_train, Y_val = train_test_split(train_data, target , test_size = 0.1, random_state=42 )
Digit Recognizer
4,439,703
random_cv.best_estimator_<choose_model_class>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
4,439,703
regressor = xgboost.XGBRegressor(base_score=0.25, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, gamma=0, gpu_id=-1, importance_type='gain', interaction_constraints='', learning_rate=0.1, max_delta_step=0, max_depth=2, min_child_weight=1, missing=nan, monotone_constraints='() ', n_estimators=900, n_jobs=8, num_parallel_tree=1, random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='exact', validate_parameters=1, verbosity=None )<train_model>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.0001 )
Digit Recognizer
4,439,703
regressor.fit(x_train,y_train )<predict_on_test>
model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',kernel_initializer='he_normal',input_shape=input_shape)) model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',kernel_initializer='he_normal')) model.add(MaxPool2D(( 2, 2))) model.add(BatchNormalization()) model.add(Dropout(0.20)) model.add(Conv2D(64,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal')) model.add(Conv2D(64,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Conv2D(128,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(num_classes, activation='softmax'))
Digit Recognizer
4,439,703
y_pred = regressor.predict(x_test )<save_to_csv>
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'] )
Digit Recognizer
4,439,703
df_pred = pd.DataFrame(y_pred) df_sub = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv') df_sub = pd.concat([df_sub['Id'],df_pred],axis=1) df_sub.columns = ['Id','SalePrice'] df_sub.to_csv('sample_submission.csv',index=False )<import_modules>
fitting = model.fit_generator(datagen.flow(X_train, Y_train, batch_size = 90), epochs = 50, validation_data =(X_val, Y_val), verbose = 1, callbacks = [learning_rate_reduction] )
Digit Recognizer
4,439,703
import numpy as np import pandas as pd import seaborn as sns import sklearn from sklearn import metrics import tensorflow as tf from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split, StratifiedKFold import numpy as np<load_from_csv>
predicted_classes = model.predict_classes(test_data )
Digit Recognizer
4,439,703
<compute_train_metric><EOS>
submissions=pd.DataFrame({"ImageId": list(range(1,len(predicted_classes)+1)) , "Label": predicted_classes}) submissions.to_csv("submission.csv", index=False, header=True )
Digit Recognizer
2,285,148
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_test_metric>
print(os.listdir(".. /input")) sns.set(style='white', context='notebook', palette='deep')
Digit Recognizer
2,285,148
def rmlse(y_true, y_pred): return tf.sqrt(tf.reduce_mean(tf.square(tf.math.log(y_pred + 1)- tf.math.log(y_true + 1))))<save_to_csv>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
2,285,148
def submit(model, X, ids, file_path): SalePrice = model.predict(X) submission = pd.DataFrame({"Id": ids, "SalePrice": SalePrice.reshape(-1)}) submission.to_csv(file_path, index=False )<sort_values>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
2,285,148
train.corr() ["SalePrice"].sort_values(key = lambda x: abs(x), ascending=False )<feature_engineering>
X_train.head()
Digit Recognizer
2,285,148
for data in [train, test]: null_counts = data.isnull().sum() null_counts[null_counts > 0] null_columns = list(pd.DataFrame(null_counts[null_counts > 0] ).index) for column in null_columns: if data[column].dtype == object: data[column] = data[[column]].replace(np.NAN, "Unknown") else: data[column] = data[column].replace(np.NAN, data[column].mean() )<categorify>
X_train.isnull().any().sum()
Digit Recognizer
2,285,148
train_test = pd.get_dummies(pd.concat([train, test]))<drop_column>
test.isnull().any().sum()
Digit Recognizer
2,285,148
train_features = train_test.iloc[0: len(train)] test_features = train_test.iloc[len(train):] _ = train_features.pop("Id") _ = test_features.pop("SalePrice") test_ids = test_features.pop("Id" )<split>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
2,285,148
train_features, val_features = train_test_split(train_features, test_size=0.2, random_state=np.random.randint(1000))<sort_values>
X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1 )
Digit Recognizer
2,285,148
thresold = 0.05 correlated_scores = train_features.corr() ["SalePrice"] correlated_scores = correlated_scores[correlated_scores.abs() >= thresold] correlated_columns = list(correlated_scores.index) correlated_columns.remove("SalePrice") print(correlated_columns )<drop_column>
y_train = to_categorical(y_train, num_classes=10 )
Digit Recognizer
2,285,148
train_targets = train_features.pop("SalePrice") val_targets = val_features.pop("SalePrice" )<define_variables>
random_seed = 2
Digit Recognizer
2,285,148
categorical_columns = set(train.dtypes[train.dtypes==object].index )<feature_engineering>
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=random_seed )
Digit Recognizer
2,285,148
scale_strategies = ["none", "standard_scale", "standard_scale_exclude_categorcial_features"] scale_strategy = scale_strategies[2] if scale_strategy == scale_strategies[1]: train_features =(train_features - mean_value)/ std_value val_features =(val_features - mean_value)/ std_value test_features =(test_features - mean_value)/ std_value if scale_strategy == scale_strategies[2]: for column in train_features.columns: is_categorical_feature = False components = column.split("_") if len(components)== 2 and components[0] in categorical_columns: is_categorical_feature = True if is_categorical_feature == False: for features in [train_features, val_features, test_features]: features.loc[:, column] =(features.loc[:, column] - mean_value[column])/ std_value[column]<drop_column>
model = Sequential() model.add(Conv2D(filters=40, kernel_size=(5,5), padding='Same', activation='relu', input_shape=(28, 28, 1))) model.add(Conv2D(filters=40, kernel_size=(5,5), padding='Same', activation='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Conv2D(filters=60, kernel_size=(3,3), padding='Same', activation='relu')) model.add(Conv2D(filters=60, kernel_size=(3,3), padding='Same', activation='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(300, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax'))
Digit Recognizer
2,285,148
use_correlated_columns = True if use_correlated_columns: train_features = train_features[correlated_columns] val_features = val_features[correlated_columns] test_features = test_features[correlated_columns]<train_model>
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False )
Digit Recognizer
2,285,148
begin = time.time() parameters = { "depth": [4, 5, 6, 7, 8, 9], "learning_rate": [0.01, 0.05, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, 0.13, 0.14, 0.15], "iterations": [500, 10000], } def train_with_catboost(hyperparameters, X_train, X_val, y_train, y_val): keys = hyperparameters.keys() best_index = {key:0 for key in keys} best_cat = None best_score = 10e8 for(index, key)in enumerate(keys): print("Find best parameter for %s" %(key)) items = hyperparameters[key] best_parameter = None temp_best = 10e8 for(key_index, item)in enumerate(items): iterations = hyperparameters["iterations"][best_index["iterations"]] if key != "iterations" else item learning_rate = hyperparameters["learning_rate"][best_index["learning_rate"]] if key != "learning_rate" else item depth = hyperparameters["depth"][best_index["depth"]] if key != "depth" else item print("Training with iterations: %d learning_rate: %.2f depth:%d"%(iterations, learning_rate, depth)) cat = catboost.CatBoostRegressor( iterations = iterations, learning_rate = learning_rate, depth = depth, verbose=500 ) cat.fit(X_train, y_train, verbose=False) result = evaluate(cat, X_val, y_val) score = result["rmlse"] if score < temp_best: temp_best = score best_index[key] = key_index best_parameter = item if score < best_score: best_score = score best_cat = cat print("Best Parameter for %s: "%(key), best_parameter) best_parameters = { "iterations": hyperparameters["iterations"][best_index["iterations"]], "learning_rate": hyperparameters["learning_rate"][best_index["learning_rate"]], "depth": hyperparameters["depth"][best_index["depth"]] } return best_cat, best_score, best_parameters best_cat, best_score, best_parameters = train_with_catboost(parameters, train_features, val_features, train_targets, val_targets) print("Best RMLSE: ", best_score) print("Best Parameters: ", best_parameters) elapsed = time.time() - begin print("Elapsed time: ", elapsed) submit(best_cat, test_features, test_ids, "submission_cat.csv" )<train_model>
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
2,285,148
X = pd.concat([train_features, val_features]) y = pd.concat([train_targets, val_targets]) fold = 1 models = [] for train_indices, valid_indices in KFold(n_splits=5, shuffle=True ).split(X): print("Training with Fold %d" %(fold)) X_train = X.iloc[train_indices] X_val = X.iloc[valid_indices] y_train = y.iloc[train_indices] y_val = y.iloc[valid_indices] cat = catboost.CatBoostRegressor( iterations = best_parameters["iterations"], learning_rate = best_parameters["learning_rate"], depth = best_parameters["depth"] ) cat.fit(X_train, y_train, verbose=False) models.append(cat) evaluate(cat, X_val, y_val) submit(cat, test_features, test_ids, "submission_cat_fold%d.csv"%(fold)) fold += 1<save_to_csv>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=5, factor=0.5, verbose=1, min_lr=0.00001 )
Digit Recognizer
2,285,148
SalePrice = np.mean([model.predict(test_features)for model in models], axis=0) submission = pd.DataFrame({"Id": test_ids, "SalePrice": SalePrice}) submission.to_csv("submission.csv", index=False )<train_on_grid>
epochs = 1 batch_size = 84
Digit Recognizer
2,285,148
cat = catboost.CatBoostRegressor( iterations = best_parameters["iterations"], learning_rate = best_parameters["learning_rate"], depth = best_parameters["depth"] ) cat.fit(X, y, verbose=False) evaluate(cat, X, y) submit(cat, test_features, test_ids, "submission_cat_all_dataset.csv" )<set_options>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
2,285,148
warnings.filterwarnings('ignore' )<load_from_csv>
history = model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), epochs=epochs, verbose=2, callbacks=[learning_rate_reduction], validation_data=(X_val, y_val), steps_per_epoch=X_train.shape[0] // batch_size )
Digit Recognizer
2,285,148
train = pd.read_csv('.. /input/tabular-playground-series-feb-2021/train.csv') test = pd.read_csv('.. /input/tabular-playground-series-feb-2021/test.csv' )<prepare_x_and_y>
result = model.predict(test) result = np.argmax(result, axis=1) result = pd.Series(result, name='Label')
Digit Recognizer
2,285,148
X_train = train.drop(['id', 'target'], axis=1) y_train = train.target X_test = test.drop(['id'], axis=1 )<categorify>
my_sumission = pd.concat([pd.Series(range(1, 28001), name='ImageId'), result], axis=1) my_sumission.to_csv("submission.csv", index=False )
Digit Recognizer
1,437,082
cat_cols = [feature for feature in train.columns if 'cat' in feature] def label_encoder(df): for feature in cat_cols: le = LabelEncoder() le.fit(df[feature]) df[feature] = le.transform(df[feature]) return df<categorify>
trainRaw = pd.read_csv('.. /input/train.csv') testRaw = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
1,437,082
X_train = label_encoder(X_train) X_test = label_encoder(X_test )<choose_model_class>
train = trainRaw.copy() test_imagesKaggle = testRaw.copy() train_labelsKaggle = trainRaw['label'] print("train with Labels ", train.shape) print("train_labelsKaggle ", train_labelsKaggle.shape) print("_"*50) train.drop(['label'],axis=1, inplace=True) train_imagesKaggle = train print("train_imagesKaggle without Labels ", train_imagesKaggle.shape) print("_"*50) print("test_imagesKaggle ", test_imagesKaggle.shape )
Digit Recognizer
1,437,082
split = KFold(n_splits=5, random_state=2 )<train_model>
train4Display = np.array(train_imagesKaggle ).reshape(42000,28,28) test4Display = np.array(test_imagesKaggle ).reshape(28000,28,28) z = 4056 print("train image") print(train_labelsKaggle[z]) digit = train4Display[z] plt.imshow(digit, cmap=plt.cm.binary) plt.show() print("test image") digit = test4Display[z] plt.imshow(digit, cmap=plt.cm.binary) plt.show()
Digit Recognizer
1,437,082
def objective(trial, X, y, name='xgb'): params = {'max_depth':trial.suggest_int('max_depth', 5, 50), 'n_estimators':200000, 'subsample': trial.suggest_uniform('subsample', 0.2, 1.0), 'colsample_bytree':trial.suggest_uniform('colsample_bytree', 0.2, 1.0), 'learning_rate':trial.suggest_uniform('learning_rate', 0.007, 0.02), 'reg_lambda':trial.suggest_uniform('reg_lambda', 0.01, 50), 'reg_alpha':trial.suggest_uniform('reg_alpha', 0.01, 50), 'min_child_samples':trial.suggest_int('min_child_samples', 5, 100), 'num_leaves':trial.suggest_int('num_leaves', 10, 200), 'n_jobs' : -1, 'metric':'rmse', 'max_bin':trial.suggest_int('max_bin', 300, 1000), 'cat_smooth':trial.suggest_int('cat_smooth', 5, 100), 'cat_l2':trial.suggest_loguniform('cat_l2', 1e-3, 100)} model = LGBMRegressor(**params) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=0) model.fit(X_train, y_train, eval_set=[(X_val, y_val)], eval_metric=['rmse'], early_stopping_rounds=250, categorical_feature=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], verbose=0) train_score = np.round(np.sqrt(mean_squared_error(y_train, model.predict(X_train))), 5) test_score = np.round(np.sqrt(mean_squared_error(y_val, model.predict(X_val))), 5) print(f'TRAIN RMSE : {train_score} || TEST RMSE : {test_score}') return test_score<train_model>
train_imagesKaggle = train4Display.reshape(42000,28,28,1) test_imagesKaggle = test4Display.reshape(28000,28,28,1) train_imagesKaggle = train_imagesKaggle.astype('float32')/ 255 test_imagesKaggle = test_imagesKaggle.astype('float32')/ 255 print("train_imagesKaggle ",train_imagesKaggle.shape) print("test_imagesKaggle ", test_imagesKaggle.shape) print("_"*50) train_labelsKaggle = to_categorical(train_labelsKaggle) print("train_labelsKaggle ",train_labelsKaggle.shape )
Digit Recognizer
1,437,082
optimize = partial(objective, X=X_train, y=y_train) study_lgbm = optuna.create_study(direction='minimize') <init_hyperparams>
( train_imagesRaw, train_labelsRaw),(test_imagesRaw, test_labelsRaw)= mnist.load_data()
Digit Recognizer
1,437,082
lgbm_params = {'max_depth': 16, 'subsample': 0.8032697250789377, 'colsample_bytree': 0.21067140508531404, 'learning_rate': 0.009867383057779643, 'reg_lambda': 10.987474846877767, 'reg_alpha': 17.335285595031994, 'min_child_samples': 31, 'num_leaves': 66, 'max_bin': 522, 'cat_smooth': 81, 'cat_l2': 0.029690334194270022, 'metric': 'rmse', 'n_jobs': -1, 'n_estimators': 20000}<train_model>
train_imagesKeras = train_imagesRaw.copy() train_labelsKeras = train_labelsRaw.copy() test_imagesKeras = test_imagesRaw.copy() test_labelsKeras = test_labelsRaw.copy() train_imagesKeras = train_imagesKeras.reshape(60000,28,28,1) test_imagesKeras = test_imagesKeras.reshape(10000,28,28,1) print("train_imagesKeras ",train_imagesKeras.shape) print("train_labelsKeras ",train_labelsKeras.shape) print("test_imagesKeras ", test_imagesKeras.shape) print("test_labelsKeras ", test_labelsKeras.shape) train_imagesKeras = train_imagesKeras.astype('float32')/ 255 test_imagesKeras = test_imagesKeras.astype('float32')/ 255 print("_"*50) train_labelsKeras = to_categorical(train_labelsKeras) test_labelsKeras = to_categorical(test_labelsKeras) print("train_labelsKeras ",train_labelsKeras.shape) print("test_labelsKeras ", test_labelsKeras.shape )
Digit Recognizer