kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
4,143,339 |
folds_average_lgbm.fit(lgb_params, train_x, train_y )<compute_test_metric>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 )
|
Digit Recognizer
|
4,143,339 |
np.sqrt(mean_squared_error(df_train.target, folds_average_lgbm.oof_preds))<predict_on_test>
|
adam = keras.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer= adam,
metrics=['accuracy'] )
|
Digit Recognizer
|
4,143,339 |
y_pred = folds_average_lgbm.predict(test_x )<save_to_csv>
|
callbacks=myCallback()
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=128),
epochs = 80, validation_data =(X_val,Y_val), callbacks=[callbacks] )
|
Digit Recognizer
|
4,143,339 |
sub = df_sample.copy()
sub["target"] = y_pred
sub.to_csv("submission_optuna_lgbm_ohe_v2.csv", index=False)
sub.head()<import_modules>
|
num_images=test_data.shape[0]
test_as_array = test_data.values[:,:]
test_shaped_array = test_as_array.reshape(num_images, img_rows, img_cols, 1)
out_test= test_shaped_array / 255
y_pred=model.predict_classes(out_test )
|
Digit Recognizer
|
4,143,339 |
import lightgbm as lgb
import optuna.integration.lightgbm as oplgb
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import category_encoders as ce
import seaborn as sns<load_from_csv>
|
submission = pd.DataFrame({"ImageId": np.arange(1, len(test_data)+1), "Label": y_pred})
submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
4,564,916 |
df_train = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2021/train.csv")
df_test = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2021/test.csv")
df_sample = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2021/sample_submission.csv" )<drop_column>
|
print(os.listdir(".. /input"))
print(os.getcwd())
X_data = pd.read_csv(".. /input/train.csv")
T_df = pd.read_csv(".. /input/test.csv")
Y_df = X_data["label"]
X_df = X_data.drop("label", axis=1)
X = X_df.values
X = X/255
Y = Y_df.values
np.random.seed(100)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y)
Y_train_r = np.ravel(Y_train)
Y_test_r = np.ravel(Y_test)
Y_train_c = np_utils.to_categorical(Y_train, 10)
Y_test_c = np_utils.to_categorical(Y_test, 10 )
|
Digit Recognizer
|
4,564,916 |
train_id = df_train["id"]
test_id = df_test["id"]
df_train.drop("id", axis=1, inplace=True)
df_test.drop("id", axis=1, inplace=True )<define_variables>
|
KNN = KNeighborsClassifier(n_neighbors=1)
KNN.fit(X_train,Y_train)
KNN.score(X_test, Y_test)
|
Digit Recognizer
|
4,564,916 |
cat_features = [f"cat{i}" for i in range(9 + 1)]<categorify>
|
KNN_2 = KNeighborsClassifier(n_neighbors=2)
KNN_2.fit(X_train,Y_train)
KNN_2.score(X_test, Y_test )
|
Digit Recognizer
|
4,564,916 |
onehot_encoder = ce.one_hot.OneHotEncoder()
onehot_encoder.fit(pd.concat([df_train[cat_features], df_test[cat_features]], axis=0))
train_ohe = onehot_encoder.transform(df_train[cat_features])
test_ohe = onehot_encoder.transform(df_test[cat_features])
train_ohe.columns = [f"OHE_{col}" for col in train_ohe]
test_ohe.columns = [f"OHE_{col}" for col in test_ohe]<define_variables>
|
svclassifier = SVC(kernel="linear")
svclassifier.fit(X_train, Y_train)
Y_pred = svclassifier.predict(X_test)
accuracy_score(Y_test, Y_pred )
|
Digit Recognizer
|
4,564,916 |
numerical_features = [f"cont{i}" for i in range(13 + 1)]<concatenate>
|
svclassifier_rbf = SVC(kernel="rbf", gamma = "auto")
svclassifier_rbf.fit(X_train, Y_train)
Y_pred = svclassifier_rbf.predict(X_test)
accuracy_score(Y_test, Y_pred )
|
Digit Recognizer
|
4,564,916 |
train_x = pd.concat([
df_train[numerical_features],
train_ohe
], axis=1 )<concatenate>
|
Lreg = list()
for i in range(-3,4):
c = 10**i
dummy = LogisticRegression(multi_class="multinomial",solver="lbfgs", max_iter = 4000, C=c)
Lreg.append(dummy.fit(X_train,Y_train_r))
Lreg_scores = list()
for i in range(0,6):
Lreg_scores.append(Lreg[i].score(X_test, Y_test))
print(Lreg_scores )
|
Digit Recognizer
|
4,564,916 |
test_x = pd.concat([
df_test[numerical_features],
test_ohe
], axis=1 )<prepare_x_and_y>
|
neural1 = Sequential()
neural1.add(Dense(16, input_dim=784 , activation='relu'))
neural1.add(Dense(10, activation='softmax'))
neural2 = Sequential()
neural2.add(Dense(32, input_dim=784 , activation='relu'))
neural2.add(Dense(10, activation='softmax'))
neural3 = Sequential()
neural3.add(Dense(64, input_dim=784 , activation='relu'))
neural3.add(Dense(10, activation='softmax'))
neural4 = Sequential()
neural4.add(Dense(128, input_dim=784 , activation='relu'))
neural4.add(Dense(10, activation='softmax'))
neural5 = Sequential()
neural5.add(Dense(256, input_dim=784 , activation='relu'))
neural5.add(Dense(10, activation='softmax'))
neural6 = Sequential()
neural6.add(Dense(512, input_dim=784 , activation='relu'))
neural6.add(Dense(10, activation='softmax'))
neural7 = Sequential()
neural7.add(Dense(1024, input_dim=784 , activation='relu'))
neural7.add(Dense(10, activation='softmax'))
neural8 = Sequential()
neural8.add(Dense(2048, input_dim=784 , activation='relu'))
neural8.add(Dense(10, activation='softmax'))
neural1.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
neural2.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
neural3.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
neural4.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
neural5.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
neural6.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
neural7.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
neural8.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
neural1.fit(X_train, Y_train_c, epochs=50, batch_size=200)
neural2.fit(X_train, Y_train_c, epochs=50, batch_size=200)
neural3.fit(X_train, Y_train_c, epochs=50, batch_size=200)
neural4.fit(X_train, Y_train_c, epochs=50, batch_size=200)
neural5.fit(X_train, Y_train_c, epochs=50, batch_size=200)
neural6.fit(X_train, Y_train_c, epochs=50, batch_size=200)
neural7.fit(X_train, Y_train_c, epochs=50, batch_size=200)
neural8.fit(X_train, Y_train_c, epochs=50, batch_size=200)
scores1 = neural1.evaluate(X_test, Y_test_c)
scores2 = neural2.evaluate(X_test, Y_test_c)
scores3 = neural3.evaluate(X_test, Y_test_c)
scores4 = neural4.evaluate(X_test, Y_test_c)
scores5 = neural5.evaluate(X_test, Y_test_c)
scores6 = neural6.evaluate(X_test, Y_test_c)
scores7 = neural7.evaluate(X_test, Y_test_c)
scores8 = neural8.evaluate(X_test, Y_test_c)
plt.plot([1,2,3,4,5,6,7,8],
[scores1[0],scores2[0],scores3[0],scores4[0],scores5[0],scores6[0],
scores7[0],scores8[0]],'-o' )
|
Digit Recognizer
|
4,564,916 |
train_y = df_train["target"]<create_dataframe>
|
X_train_2d = X_train.reshape(X_train.shape[0], 28, 28,1)
X_train_2d.shape
X_test_2d = X_test.reshape(X_test.shape[0], 28, 28,1)
X_test_2d.shape
model_1= Sequential()
model_1.add(Conv2D(filters = 32, kernel_size =(5,5),activation ='relu', input_shape=(28,28,1)))
model_1.add(MaxPooling2D(pool_size=(2, 2)))
model_1.add(Flatten())
model_1.add(Dense(512, activation='relu'))
model_1.add(Dense(10, activation='softmax'))
model_2= Sequential()
model_2.add(Conv2D(filters = 32, kernel_size =(5,5),activation ='relu', input_shape=(28,28,1)))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
model_2.add(Conv2D(filters = 64, kernel_size =(5,5),activation ='relu'))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
model_2.add(Flatten())
model_2.add(Dense(512, activation='relu'))
model_2.add(Dense(10, activation='softmax'))
model_3= Sequential()
model_3.add(Conv2D(filters = 32, kernel_size =(5,5),activation ='relu', input_shape=(28,28,1)))
model_3.add(MaxPooling2D(pool_size=(2, 2)))
model_3.add(Conv2D(filters = 64, kernel_size =(5,5),activation ='relu'))
model_3.add(MaxPooling2D(pool_size=(2, 2)))
model_3.add(Conv2D(filters = 128, kernel_size =(3,3),activation ='relu'))
model_3.add(MaxPooling2D(pool_size=(2, 2)))
model_3.add(Flatten())
model_3.add(Dense(512, activation='relu'))
model_3.add(Dense(10, activation='softmax'))
model_1.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model_1.fit(X_train_2d, Y_train_c, epochs=30, batch_size=200)
model_2.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model_2.fit(X_train_2d, Y_train_c, epochs=30, batch_size=200)
model_3.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model_3.fit(X_train_2d, Y_train_c, epochs=30, batch_size=200)
scores1 = model_1.evaluate(X_test_2d, Y_test_c)
scores2 = model_2.evaluate(X_test_2d, Y_test_c)
scores3 = model_3.evaluate(X_test_2d, Y_test_c)
|
Digit Recognizer
|
4,564,916 |
oplgb_train_data = oplgb.Dataset(train_x, train_y )<init_hyperparams>
|
model_2= Sequential()
model_2.add(Conv2D(filters = 32, kernel_size =(5,5),activation ='relu', input_shape=(28,28,1)))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
model_2.add(Dropout(0.4))
model_2.add(Conv2D(filters = 64, kernel_size =(5,5),activation ='relu'))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
model_2.add(Dropout(0.4))
model_2.add(Flatten())
model_2.add(Dense(128, activation='relu'))
model_2.add(Dropout(0.4))
model_2.add(Dense(10, activation='softmax'))
model_2.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model_2.fit(X_train_2d, Y_train_c, epochs=30, batch_size=200 )
|
Digit Recognizer
|
4,564,916 |
oplgb_params = {
"objective": "regression",
"metric": "root_mean_squared_error",
"verbosity": -1,
"learning_rate": 0.01
}<choose_model_class>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train_2d )
|
Digit Recognizer
|
4,564,916 |
folds = KFold(n_splits=5, shuffle=True, random_state=2021 )<choose_model_class>
|
history = model_2.fit_generator(datagen.flow(X_train_2d,Y_train_c, batch_size=200),
epochs = 25, validation_data =(X_test_2d,Y_test_c),
verbose = 2, steps_per_epoch=X_train.shape[0]/200)
|
Digit Recognizer
|
4,564,916 |
tuner_cv = oplgb.LightGBMTunerCV(oplgb_params, oplgb_train_data, num_boost_round=1000, early_stopping_rounds=100, folds=folds, verbose_eval=100, time_budget=21600)
tuner_cv.run()<find_best_params>
|
scores2 = model_2.evaluate(X_test_2d, Y_test_c)
print(scores2)
|
Digit Recognizer
|
4,564,916 |
<train_model><EOS>
|
T = T_df.values
T = T/255
T = T.reshape(T.shape[0], 28, 28,1)
results = model_2.predict(T)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("submission_kaggle_2D.csv",index=False )
|
Digit Recognizer
|
5,512,742 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<init_hyperparams>
|
warnings.filterwarnings("ignore")
sns.set(style='white', context='notebook', palette='deep')
def binary_pred_stats(ytrue, ypred, threshold=0.5):
one_correct = np.sum(( ytrue==1)*(ypred > threshold))
zero_correct = np.sum(( ytrue==0)*(ypred <= threshold))
sensitivity = one_correct / np.sum(ytrue==1)
specificity = zero_correct / np.sum(ytrue==0)
accuracy =(one_correct + zero_correct)/ len(ytrue)
return sensitivity, specificity, accuracy
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float')/ cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])) :
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label' )
|
Digit Recognizer
|
5,512,742 |
lgb_params = dict(tuner_cv.best_params)
lgb_params["learning_rate"] = 0.005
lgb_params["early_stopping_round"] = 200
lgb_params["num_iterations"] = 20000<statistical_test>
|
df_train = pd.read_csv(".. /input/digit-recognizer/train.csv")
df_test = pd.read_csv(".. /input/digit-recognizer/test.csv")
|
Digit Recognizer
|
5,512,742 |
folds_average_lgbm = FoldsAverageLGBM(folds )<train_model>
|
df_train.info()
df_train.head()
Y_train = df_train['label']
X_train = df_train.drop(labels = ["label"],axis = 1)
X_test = df_test
Y_train.hist()
Y_train.value_counts()
X_train = X_train.values.reshape(-1,28,28,1)
X_test = X_test.values.reshape(-1,28,28,1)
input_shape =(28,28,1)
X_train = X_train / 255.0
X_test = X_test / 255.0
Y_train = to_categorical(Y_train, num_classes = 10)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.15, random_state = 3)
plt.figure()
g = plt.imshow(X_train[10][:,:,0])
print(Y_train[10])
|
Digit Recognizer
|
5,512,742 |
folds_average_lgbm.fit(lgb_params, train_x, train_y )<compute_test_metric>
|
model = Sequential()
model.add(Conv2D(filters = 128, kernel_size=(5, 5), activation='relu', padding='same', input_shape = input_shape))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size=(5, 5), activation='relu', padding='same', input_shape = input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.summary()
|
Digit Recognizer
|
5,512,742 |
np.sqrt(mean_squared_error(df_train.target, folds_average_lgbm.oof_preds))<predict_on_test>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 50
batch_size = 128
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.10,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train)
|
Digit Recognizer
|
5,512,742 |
y_pred = folds_average_lgbm.predict(test_x )<save_to_csv>
|
%%time
estimator = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction])
|
Digit Recognizer
|
5,512,742 |
sub = df_sample.copy()
sub["target"] = y_pred
sub.to_csv("submission_optuna_lgbm_ohe_v1.csv", index=False)
sub.head()<set_options>
|
predtrain = model.predict(X_train)
sensitivity, specificity, accuracy = binary_pred_stats(Y_train, predtrain)
print("train set:", sensitivity, specificity, accuracy)
predtest = model.predict(X_val)
sensitivity, specificity, accuracy = binary_pred_stats(Y_val, predtest)
print("test set: ", sensitivity, specificity, accuracy )
|
Digit Recognizer
|
5,512,742 |
warnings.filterwarnings("ignore" )<load_from_csv>
|
results = model.predict(X_test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_with_datagen.csv",index=False )
|
Digit Recognizer
|
6,487,634 |
train_data = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2021/train.csv')
test_data = pd.read_csv('/kaggle/input/tabular-playground-series-feb-2021/test.csv')
print(train_data.head() , "
")
print(test_data.head() )<import_modules>
|
import random
import keras
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as mpatches
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from skimage.color import label2rgb
from math import sqrt
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Input, BatchNormalization
from keras.optimizers import RMSprop, Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import LearningRateScheduler
from keras.callbacks import ModelCheckpoint
|
Digit Recognizer
|
6,487,634 |
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns<define_variables>
|
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
|
Digit Recognizer
|
6,487,634 |
cat_features = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9']
numerical_features = ['cont0', 'cont1', 'cont2', 'cont3', 'cont4', 'cont5','cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']<filter>
|
y_train = train['label']
X = train.drop(['label'],axis=1)
|
Digit Recognizer
|
6,487,634 |
outlier = train_data.loc[train_data.target < 1.0]
print(outlier, "
")
print(outlier.index )<drop_column>
|
labels =[]
frequencies = []
for i in range(len(y_train)) :
lab, freq = str(y_train[i]), len([n for n in X.values[i] if n > 0])
labels.append(lab)
frequencies.append(freq)
data = {'Labels':labels, 'Frequencies':frequencies}
df = pd.DataFrame(data )
|
Digit Recognizer
|
6,487,634 |
train_data.drop([99682], inplace = True )<prepare_x_and_y>
|
df.groupby('Labels' ).mean()
|
Digit Recognizer
|
6,487,634 |
categorical_features = cat_features
y_train = train_data["target"]
train_data.drop(columns = ['target'], inplace = True)
test_data_backup = test_data.copy()
train_data.drop(columns = ["id"], inplace = True)
test_data.drop(columns = ["id"], inplace = True )<choose_model_class>
|
test_y = df['Labels']
test_x = df
test_x.drop('Labels', axis =1, inplace = True)
|
Digit Recognizer
|
6,487,634 |
model_ctb = CatBoostRegressor(iterations = 3000,
learning_rate = 0.02,
od_type = 'Iter',
loss_function = 'RMSE',
grow_policy = 'SymmetricTree',
subsample = 0.8,
verbose = 3,
random_seed = 17)
model_ctb.fit(train_data, y_train, cat_features=categorical_features)
y_pred = model_ctb.predict(test_data)
print(y_pred )<save_to_csv>
|
X_train1, X_val1, y_train1, y_val1 = train_test_split(test_x, test_y, test_size=0.1, random_state=1337 )
|
Digit Recognizer
|
6,487,634 |
solution = pd.DataFrame({"id":test_data_backup.id, "target":y_pred})
solution.to_csv("solution.csv", index = False)
print("saved successful!" )<set_options>
|
clf_rfc = RandomForestClassifier(n_estimators = 100)
clf_rfc.fit(X_train1,y_train1 )
|
Digit Recognizer
|
6,487,634 |
%matplotlib inline
<install_modules>
|
accuracy_score(clf_rfc.predict(X_val1), y_val1 )
|
Digit Recognizer
|
6,487,634 |
!pip install --upgrade xgboost
xgb.__version__<set_options>
|
clf_knn = KNeighborsClassifier()
clf_knn.fit(X_train1,y_train1)
accuracy_score(clf_knn.predict(X_val1), y_val1 )
|
Digit Recognizer
|
6,487,634 |
shap.initjs()<load_from_csv>
|
imaginary_data_pca = pca.transform(imaginary_data)
print("original shape: ", imaginary_data.shape)
print("transformed shape:", imaginary_data_pca.shape )
|
Digit Recognizer
|
6,487,634 |
train = pd.read_csv('.. /input/tabular-playground-series-feb-2021/train.csv')
test = pd.read_csv('.. /input/tabular-playground-series-feb-2021/test.csv')
sub = pd.read_csv('.. /input/tabular-playground-series-feb-2021/sample_submission.csv')
<prepare_x_and_y>
|
pca = PCA(n_components=2)
pca.fit(imaginary_data)
imaginary_data_pca = pca.transform(imaginary_data)
|
Digit Recognizer
|
6,487,634 |
target = train['target'].values<categorify>
|
imaginary_data_pca_new = pca.inverse_transform(imaginary_data_pca)
|
Digit Recognizer
|
6,487,634 |
for feature in cat_features:
le = LabelEncoder()
le.fit(train[feature])
train[feature] = le.transform(train[feature])
test[feature] = le.transform(test[feature] )<define_variables>
|
data = {'Labels':labels, 'diagonals':diags, 'widths':widths, 'heights':heights, 'Area':frequencies, 'PC1':principalComponents[:, 0],'PC2':principalComponents[:, 1]}
df = pd.DataFrame(data)
df
|
Digit Recognizer
|
6,487,634 |
train_oof = np.zeros(( 300000,))
test_preds = 0
train_oof.shape<init_hyperparams>
|
test_y = df['Labels']
test_x = df
test_x.drop('Labels', axis =1, inplace = True)
X_train1, X_val1, y_train1, y_val1 = train_test_split(test_x, test_y, test_size=0.1, random_state=1337)
clf_knn = KNeighborsClassifier()
clf_knn.fit(X_train1,y_train1)
accuracy_score(clf_knn.predict(X_val1), y_val1 )
|
Digit Recognizer
|
6,487,634 |
xgb_params= {
"objective": "reg:squarederror",
"max_depth": 6,
"learning_rate": 0.01,
"colsample_bytree": 0.4,
"subsample": 0.6,
"reg_alpha" : 6,
"min_child_weight": 100,
"n_jobs": 2,
"seed": 2001,
'tree_method': "gpu_hist",
"gpu_id": 0,
'predictor': 'gpu_predictor'
}<prepare_x_and_y>
|
mbdl = MiniBatchDictionaryLearning(n_components = 2)
mbdl.fit(X )
|
Digit Recognizer
|
6,487,634 |
test = xgb.DMatrix(test[columns] )<train_model>
|
comps = mbdl.transform(X )
|
Digit Recognizer
|
6,487,634 |
NUM_FOLDS = 10
kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=0)
for f,(train_ind, val_ind)in tqdm(enumerate(kf.split(train, target))):
train_df, val_df = train.iloc[train_ind][columns], train.iloc[val_ind][columns]
train_target, val_target = target[train_ind], target[val_ind]
train_df = xgb.DMatrix(train_df, label=train_target)
val_df = xgb.DMatrix(val_df, label=val_target)
model = xgb.train(xgb_params, train_df, 3800)
temp_oof = model.predict(val_df)
temp_test = model.predict(test)
train_oof[val_ind] = temp_oof
test_preds += temp_test/NUM_FOLDS
print(mean_squared_error(temp_oof, val_target, squared=False))<compute_test_metric>
|
def test_model(d1,d1_lab, d2, d2_lab,data):
data = {'Labels':labels, 'diagonals':diags, 'widths':widths, 'heights':heights, 'Area':frequencies, 'PC1':principalComponents[:, 0],'PC2':principalComponents[:, 1]}
data[d1_lab] = d1
data[d2_lab] = d2
df = pd.DataFrame(data)
print(df.columns)
test_y = df['Labels']
test_x = df
test_x.drop('Labels', axis =1, inplace = True)
X_train1, X_val1, y_train1, y_val1 = train_test_split(test_x, test_y, test_size=0.1, random_state=1337)
clf_knn = KNeighborsClassifier()
clf_knn.fit(X_train1,y_train1)
return accuracy_score(clf_knn.predict(X_val1), y_val1)
test_model(comps[:, 0],'d1', comps[:, 1],'d2', data )
|
Digit Recognizer
|
6,487,634 |
mean_squared_error(train_oof, target, squared=False)
<save_model>
|
lda = LinearDiscriminantAnalysis(n_components = 2,)
comps = lda.fit_transform(X,y_train.values )
|
Digit Recognizer
|
6,487,634 |
np.save('train_oof', train_oof)
np.save('test_preds', test_preds )<predict_on_test>
|
test_model(comps[:, 0],'lda1', comps[:, 1],'lda2', data )
|
Digit Recognizer
|
6,487,634 |
%%time
shap_preds = model.predict(test, pred_contribs=True )<load_from_csv>
|
X = X / 255.0
test = test / 255.0
|
Digit Recognizer
|
6,487,634 |
train = pd.read_csv('.. /input/tabular-playground-series-feb-2021/train.csv')
test = pd.read_csv('.. /input/tabular-playground-series-feb-2021/test.csv')
for feature in cat_features:
le = LabelEncoder()
le.fit(train[feature])
train[feature] = le.transform(train[feature])
test[feature] = le.transform(test[feature] )<predict_on_test>
|
X = X.values.reshape(X.shape[0], 28, 28,1)
test = test.values.reshape(test.shape[0], 28, 28,1)
|
Digit Recognizer
|
6,487,634 |
%%time
shap_interactions = model.predict(xgb.DMatrix(test[columns]), pred_interactions=True )<feature_engineering>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
valid_datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False )
|
Digit Recognizer
|
6,487,634 |
train['cont8_cont0'] = train['cont8']*train['cont0']
train['cont9_cont0'] = train['cont9']*train['cont0']
train['cont9_cont5'] = train['cont9']*train['cont5']
train['cont8_cont5'] = train['cont8']*train['cont5']
test['cont8_cont0'] = test['cont8']*test['cont0']
test['cont9_cont0'] = test['cont9']*test['cont0']
test['cont9_cont5'] = test['cont9']*test['cont5']
test['cont8_cont5'] = test['cont8']*test['cont5']
<set_options>
|
y_train = to_categorical(y_train,num_classes=10)
|
Digit Recognizer
|
6,487,634 |
del shap_interactions, shap_preds
gc.collect()
gc.collect()<prepare_x_and_y>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='accuracy',
patience=3,
verbose=10,
factor=0.5,
min_lr=0.00001)
|
Digit Recognizer
|
6,487,634 |
test = xgb.DMatrix(test[columns] )<init_hyperparams>
|
def build_model(input_shape=(28, 28, 1), classes = 10):
activation = 'relu'
padding = 'same'
gamma_initializer = 'uniform'
input_layer = Input(shape=input_shape)
hidden=Conv2D(32,(3,3), padding=padding,activation = activation, name="conv1" )(input_layer)
hidden=BatchNormalization(name="batch1" )(hidden)
hidden=Conv2D(32,(3,3), padding=padding,activation = activation, name="conv2" )(hidden)
hidden=BatchNormalization(name="batch2" )(hidden)
hidden=Conv2D(32,(5,5), padding=padding,activation = activation, name="conv3" )(hidden)
hidden=BatchNormalization(name="batch3" )(hidden)
hidden=MaxPool2D(pool_size=2, padding=padding, name="max1" )(hidden)
hidden=Dropout(0.4 )(hidden)
hidden=Conv2D(64,(3,3), padding =padding, activation = activation, name="conv4" )(hidden)
hidden=BatchNormalization(name = 'batch4' )(hidden)
hidden=Conv2D(64,(3,3), padding =padding, activation = activation, name="conv45" )(hidden)
hidden=BatchNormalization(name = 'batch5' )(hidden)
hidden=Conv2D(64,(5,5), padding =padding, activation = activation, name="conv6" )(hidden)
hidden=BatchNormalization(name = 'batch6' )(hidden)
hidden=MaxPool2D(pool_size=2, padding="same", name="max2" )(hidden)
hidden=Dropout(0.4 )(hidden)
hidden=Flatten()(hidden)
hidden=Dense(264,activation = activation, name="Dense1" )(hidden)
hidden=Dropout(0.3 )(hidden)
output = Dense(classes, activation = "softmax" )(hidden)
model = Model(inputs=input_layer, outputs=output)
return model
|
Digit Recognizer
|
6,487,634 |
xgb_params= {
"objective": "reg:squarederror",
"max_depth": 6,
"learning_rate": 0.01,
"colsample_bytree": 0.4,
"subsample": 0.6,
"reg_alpha" : 6,
"min_child_weight": 100,
"n_jobs": 2,
"seed": 2001,
'tree_method': "gpu_hist",
"gpu_id": 0,
'predictor': 'gpu_predictor'
}<train_model>
|
epochs = 50
initial_learningrate=2e-3
batch_size = 264
|
Digit Recognizer
|
6,487,634 |
NUM_FOLDS = 10
kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=0)
for f,(train_ind, val_ind)in tqdm(enumerate(kf.split(train, target))):
train_df, val_df = train.iloc[train_ind][columns], train.iloc[val_ind][columns]
train_target, val_target = target[train_ind], target[val_ind]
train_df = xgb.DMatrix(train_df, label=train_target)
val_df = xgb.DMatrix(val_df, label=val_target)
model = xgb.train(xgb_params, train_df, 3800)
temp_oof = model.predict(val_df)
temp_test = model.predict(test)
train_oof_2[val_ind] = temp_oof
test_preds_2 += temp_test/NUM_FOLDS
print(mean_squared_error(temp_oof, val_target, squared=False))<compute_test_metric>
|
optimizer = Adam(learning_rate=initial_learningrate)
model = build_model(input_shape=(28, 28, 1), classes = 10)
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
|
Digit Recognizer
|
6,487,634 |
mean_squared_error(train_oof_2, target, squared=False)
<compute_test_metric>
|
X_train, X_val, y_train, y_val = train_test_split(X, y_train, test_size=0.1, random_state=1337)
datagen.fit(X_train)
valid_datagen.fit(X_val)
|
Digit Recognizer
|
6,487,634 |
mean_squared_error(0.5*train_oof+0.5*train_oof_2, target, squared=False)
<save_model>
|
callbacks = [learning_rate_reduction]
history = model.fit_generator(datagen.flow(X_train,y_train),
epochs = epochs,
validation_data=valid_datagen.flow(X_val,y_val),
verbose = 1,
callbacks = callbacks)
|
Digit Recognizer
|
6,487,634 |
np.save('train_oof_2', train_oof_2)
np.save('test_preds_2', test_preds_2 )<predict_on_test>
|
y_pre_test=model.predict(X_val)
y_pre_test=np.argmax(y_pre_test,axis=1)
y_test=np.argmax(y_val,axis=1)
conf=confusion_matrix(y_test,y_pre_test)
conf=pd.DataFrame(conf,index=range(0,10),columns=range(0,10))
|
Digit Recognizer
|
6,487,634 |
%%time
shap_preds = model.predict(test, pred_contribs=True )<load_from_csv>
|
print('out of {} samples, we got {} incorrect'.format(len(X_train), round(len(X_train)- history.history['accuracy'][-1] * len(X_train))))
|
Digit Recognizer
|
6,487,634 |
train = pd.read_csv('.. /input/tabular-playground-series-feb-2021/train.csv')
test = pd.read_csv('.. /input/tabular-playground-series-feb-2021/test.csv')
for feature in cat_features:
le = LabelEncoder()
le.fit(train[feature])
train[feature] = le.transform(train[feature])
test[feature] = le.transform(test[feature])
train['cont8_cont0'] = train['cont8']*train['cont0']
train['cont9_cont0'] = train['cont9']*train['cont0']
train['cont9_cont5'] = train['cont9']*train['cont5']
train['cont8_cont5'] = train['cont8']*train['cont5']
test['cont8_cont0'] = test['cont8']*test['cont0']
test['cont9_cont0'] = test['cont9']*test['cont0']
test['cont9_cont5'] = test['cont9']*test['cont5']
test['cont8_cont5'] = test['cont8']*test['cont5']
columns = test.columns[1:]<save_to_csv>
|
predictions = model.predict(test )
|
Digit Recognizer
|
6,487,634 |
sub['target'] = test_preds
sub.to_csv('submission.csv', index=False )<save_to_csv>
|
predictions = predictions.argmax(axis = -1)
predictions
|
Digit Recognizer
|
6,487,634 |
sub['target'] = test_preds_2
sub.to_csv('submission_2.csv', index=False )<save_to_csv>
|
submission['Label'] = predictions
|
Digit Recognizer
|
6,487,634 |
sub['target'] = 1.1*test_preds-0.1*test_preds_2
sub.to_csv('submission_average_0.csv', index=False )<save_to_csv>
|
submission.to_csv('submission.csv',index=False )
|
Digit Recognizer
|
6,762,101 |
sub['target'] = 1.2*test_preds-0.2*test_preds_2
sub.to_csv('submission_average_1.csv', index=False )<save_to_csv>
|
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv" )
|
Digit Recognizer
|
6,762,101 |
sub['target'] = 1.3*test_preds-0.3*test_preds_2
sub.to_csv('submission_average_2.csv', index=False )<load_from_csv>
|
X = train.drop(['label'], axis = 1 ).values/255
Y = train['label'].values
X_valid = test.values/255
X = X.reshape(X.shape[0],28,28,1)
X_valid = X_valid.reshape(X_valid.shape[0],28,28,1)
|
Digit Recognizer
|
6,762,101 |
input_dir = os.path.join('.. ', 'input', 'tabular-playground-series-feb-2021')
sample_submission_csv_path = os.path.join(input_dir, 'sample_submission.csv')
test_csv_path = os.path.join(input_dir, 'test.csv')
train_csv_path = os.path.join(input_dir, 'train.csv')
train_df = pd.read_csv(train_csv_path)
y = train_df[['target']]
x = train_df.drop(columns=['id', 'target'])
cat_features = list(range(0, 10))
train_pool = Pool(x, y, cat_features=cat_features)
model = CatBoostRegressor(random_seed=1)
model.fit(train_pool)
test_df = pd.read_csv(test_csv_path)
x_id = test_df[['id']]
x_test = test_df.drop(columns=['id'])
test_pool = Pool(x_test, cat_features=cat_features)
target = model.predict(test_pool)
submission = pd.read_csv(sample_submission_csv_path)
submission[['target']] = target
submission.to_csv('submission.csv', index=False)
<import_modules>
|
X_train, X_dev, Y_train,Y_dev = train_test_split(X,Y,test_size = 0.2 )
|
Digit Recognizer
|
6,762,101 |
import lightgbm as lgb
import optuna.integration.lightgbm as oplgb
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import category_encoders as ce
import seaborn as sns<load_from_csv>
|
f = 2
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(f*16,kernel_size =(3,3), padding = 'same',activation='relu',
kernel_initializer='he_uniform',
input_shape =(28,28,1)) ,
tf.keras.layers.Conv2D(f*16,(3,3), activation = "relu", padding = 'same'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.Conv2D(f*32, kernel_size =(3,3), padding = 'same',activation='relu'),
tf.keras.layers.Conv2D(f*32,(3,3), activation = "relu", padding = 'same'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(f*64, kernel_size =(3,3), padding = 'same',activation='relu'),
tf.keras.layers.Conv2D(f*64,(3,3), activation = "relu", padding = 'same'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.Dropout(0.125),
keras.layers.Flatten() ,
keras.layers.Dense(512, activation = 'relu'),
keras.layers.Dense(128, activation = 'relu'),
keras.layers.Dense(10, activation='softmax')
])
model.summary()
|
Digit Recognizer
|
6,762,101 |
df_train = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2021/train.csv")
df_test = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2021/test.csv")
df_sample = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2021/sample_submission.csv" )<drop_column>
|
from keras.utils import plot_model
|
Digit Recognizer
|
6,762,101 |
train_id = df_train["id"]
test_id = df_test["id"]
df_train.drop("id", axis=1, inplace=True)
df_test.drop("id", axis=1, inplace=True )<define_variables>
|
model.compile(optimizer=Adam(learning_rate=0.0003),
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'] )
|
Digit Recognizer
|
6,762,101 |
cat_features = [f"cat{i}" for i in range(9 + 1)]<categorify>
|
train_datagen = ImageDataGenerator(rotation_range=10,
width_shift_range=0.15,
height_shift_range=0.15,
shear_range=0.05,
zoom_range=0.15,
horizontal_flip=False)
valid_datagen = ImageDataGenerator(horizontal_flip=False,
)
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
def lr_decay(epoch, initial_learningrate = 0.0003):
return initial_learningrate * 0.99 ** epoch
learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
patience=300,
verbose=1,
factor=0.5,
min_lr=0.00001)
batchsize = 512*2
epoch = 45
history = model.fit_generator(train_datagen.flow(X, Y, batch_size = batchsize),
steps_per_epoch = 100,
epochs = epoch,
callbacks=[
LearningRateScheduler(lr_decay),
callback],
validation_data=valid_datagen.flow(X_dev, Y_dev),
validation_steps=50,
)
|
Digit Recognizer
|
6,762,101 |
<categorify><EOS>
|
yhat = model.predict_classes(X_valid)
submission['Label']=pd.Series(yhat)
submission.to_csv('submission.csv',index=False )
|
Digit Recognizer
|
4,636,846 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, roc_curve, auc, roc_auc_score
from sklearn.preprocessing import label_binarize
from sklearn.svm import SVC
from itertools import cycle
from scipy import interp
from keras.optimizers import Adam
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from keras import backend as K
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Input, Dropout, Flatten
from keras.models import Model, load_model
from keras.preprocessing.image import ImageDataGenerator
|
Digit Recognizer
|
4,636,846 |
numerical_features = [f"cont{i}" for i in range(13 + 1)]<concatenate>
|
train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )
|
Digit Recognizer
|
4,636,846 |
train_x = pd.concat([
df_train[numerical_features],
train_ohe,
train_or
], axis=1 )<concatenate>
|
train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )
|
Digit Recognizer
|
4,636,846 |
test_x = pd.concat([
df_test[numerical_features],
test_ohe,
test_or
], axis=1 )<prepare_x_and_y>
|
X_train, X_valid, y_train, y_valid = train_test_split(train.drop(['label'], axis=1), train['label'], random_state = 0 )
|
Digit Recognizer
|
4,636,846 |
train_y = df_train["target"]<create_dataframe>
|
X_train = X_train.values.reshape(-1,28,28,1)
X_valid = X_valid.values.reshape(-1,28,28,1)
y_train = label_binarize(y_train, classes=range(10))
y_valid = label_binarize(y_valid, classes=range(10))
|
Digit Recognizer
|
4,636,846 |
oplgb_train_data = oplgb.Dataset(train_x, train_y )<init_hyperparams>
|
class RocAucEvaluation(Callback):
def __init__(self, validation_data=() , interval=1):
super(Callback, self ).__init__()
self.interval = interval
self.X_val, self.y_val = validation_data
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
score = roc_auc_score(self.y_val, y_pred)
print("
ROC-AUC - epoch: {:d} - score: {:.6f}".format(epoch+1, score))
|
Digit Recognizer
|
4,636,846 |
oplgb_params = {
"objective": "regression",
"metric": "root_mean_squared_error",
"verbosity": -1,
"learning_rate": 0.01
}<choose_model_class>
|
class CNN:
def __init__(self):
self.arguments = {
'batch_size': 64,
'epochs': 100,
'learning_rate': 1e-3,
'learning_rate_decay': 0,
'units': 128,
'drop_out_rate': 0.2,
'checkpoint_path': 'best_bilstm_model.hdf5',
'early_stop_patience': 10,
}
print('Building CNN Models...')
print(self.arguments)
def fit(self, X_train, y_train, X_valid, y_valid):
file_path = self.arguments['checkpoint_path']
check_point = ModelCheckpoint(file_path, monitor = "val_loss", verbose = 1,
save_best_only = True, mode = "min")
ra_val = RocAucEvaluation(validation_data=(X_valid, y_valid), interval = 1)
early_stop = EarlyStopping(monitor = "val_loss", mode = "min", patience = self.arguments['early_stop_patience'])
inp = Input(shape=(28,28,1))
x = Conv2D(filters=20, kernel_size =(5, 5), activation="relu" )(inp)
max_pool_x = MaxPooling2D(pool_size=(2,2))(x)
y = Conv2D(filters=20, kernel_size =(5, 5), activation="relu" )(max_pool_x)
max_pool_y = MaxPooling2D(pool_size=(2,2))(y)
flat = Flatten()(max_pool_y)
z = Dense(100, activation="relu" )(flat)
z = Dropout(rate = self.arguments['drop_out_rate'] )(z)
z = Dense(100, activation="relu" )(z)
z = Dropout(rate = self.arguments['drop_out_rate'] )(z)
output = Dense(10, activation="softmax" )(z)
self.model = Model(inputs = inp, outputs = output)
self.model.compile(loss = "categorical_crossentropy", optimizer = Adam(lr = self.arguments['learning_rate'],
decay = self.arguments['learning_rate_decay']), metrics = ["accuracy"])
history = self.model.fit(X_train, y_train, batch_size = self.arguments['batch_size'], epochs = self.arguments['epochs'],
validation_data =(X_valid, y_valid), verbose = 1, callbacks = [ra_val, check_point, early_stop])
self.model = load_model(file_path)
print('Finished Building CNN Model as class attribute class.model')
return self
def predict(self, X, batch_size, verbose):
return self.model.predict(X, batch_size = batch_size, verbose = verbose )
|
Digit Recognizer
|
4,636,846 |
folds = KFold(n_splits=5, shuffle=True, random_state=2021 )<choose_model_class>
|
cnn = CNN()
cnn.fit(X_train, y_train, X_valid, y_valid)
cnn_pred = cnn.predict(X_valid, batch_size = cnn.arguments['batch_size'], verbose = 1 )
|
Digit Recognizer
|
4,636,846 |
tuner_cv = oplgb.LightGBMTunerCV(oplgb_params, oplgb_train_data, num_boost_round=1000, early_stopping_rounds=100, folds=folds, verbose_eval=100, time_budget=21600)
tuner_cv.run()<find_best_params>
|
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
)
|
Digit Recognizer
|
4,636,846 |
tuner_cv.best_params<train_model>
|
train_gen = datagen.flow(X_train, y_train, batch_size=64)
valid_gen = datagen.flow(X_valid, y_valid, batch_size=64 )
|
Digit Recognizer
|
4,636,846 |
class FoldsAverageLGBM:
def __init__(self, folds):
self.folds = folds
self.models = []
def fit(self, lgb_params, train_x, train_y):
oof_preds = np.zeros_like(train_y)
self.train_x = train_x
self.train_y = train_y.values
for tr_idx, va_idx in tqdm(folds.split(train_x)) :
tr_x, va_x = self.train_x.iloc[tr_idx], self.train_x.iloc[va_idx]
tr_y, va_y = self.train_y[tr_idx], self.train_y[va_idx]
lgb_train_dataset = lgb.Dataset(tr_x, tr_y)
lgb_valid_dataset = lgb.Dataset(va_x, va_y)
model = lgb.train(lgb_params, lgb_train_dataset, valid_sets=[lgb_valid_dataset], verbose_eval=100)
self.models.append(model)
oof_pred = model.predict(va_x)
oof_preds[va_idx] = oof_pred
self.oof_preds = oof_preds
def predict(self, test_x):
preds = []
for model in tqdm(self.models):
pred = model.predict(test_x)
preds.append(pred)
preds = np.mean(preds, axis=0)
return preds
def get_feature_importance(self, importance_type="gain"):
feature_names = self.models[0].feature_name()
feature_importances_list = [model.feature_importance(importance_type)for model in self.models]
out_df = pd.DataFrame()
for i, name in enumerate(feature_names):
out_df[name] = [v[i] for v in feature_importances_list]
return out_df<init_hyperparams>
|
class CNN_Generator:
def __init__(self):
self.arguments = {
'batch_size': 64,
'epochs': 100,
'learning_rate': 1e-3,
'learning_rate_decay': 0,
'units': 128,
'drop_out_rate': 0.2,
'checkpoint_path': 'best_bilstm_model.hdf5',
'early_stop_patience': 10,
}
print('Building CNN_Generator Models...')
print(self.arguments)
def fit(self, train_gen, valid_gen):
file_path = self.arguments['checkpoint_path']
check_point = ModelCheckpoint(file_path, monitor = "val_loss", verbose = 1,
save_best_only = True, mode = "min")
ra_val = RocAucEvaluation(validation_data=(X_valid, y_valid), interval = 1)
early_stop = EarlyStopping(monitor = "val_loss", mode = "min", patience = self.arguments['early_stop_patience'])
inp = Input(shape=(28,28,1))
x = Conv2D(filters=20, kernel_size =(5, 5), activation="relu" )(inp)
max_pool_x = MaxPooling2D(pool_size=(2,2))(x)
y = Conv2D(filters=20, kernel_size =(5, 5), activation="relu" )(max_pool_x)
max_pool_y = MaxPooling2D(pool_size=(2,2))(y)
flat = Flatten()(max_pool_y)
z = Dense(100, activation="relu" )(flat)
z = Dropout(rate = self.arguments['drop_out_rate'] )(z)
z = Dense(100, activation="relu" )(z)
z = Dropout(rate = self.arguments['drop_out_rate'] )(z)
output = Dense(10, activation="softmax" )(z)
self.model = Model(inputs = inp, outputs = output)
self.model.compile(loss = "categorical_crossentropy", optimizer = Adam(lr = self.arguments['learning_rate'],
decay = self.arguments['learning_rate_decay']), metrics = ["accuracy"])
history = self.model.fit_generator(train_gen, epochs = self.arguments['epochs'],
steps_per_epoch = X_train.shape[0] // self.arguments['batch_size'],
validation_steps = X_train.shape[0] // self.arguments['batch_size'],
validation_data = valid_gen, verbose = 1, callbacks = [ra_val, check_point, early_stop])
self.model = load_model(file_path)
print('Finished Building CNN Model as class attribute class.model')
return self
def predict(self, X, batch_size, verbose):
return self.model.predict(X, batch_size = batch_size, verbose = verbose )
|
Digit Recognizer
|
4,636,846 |
lgb_params = dict(tuner_cv.best_params)
lgb_params["learning_rate"] = 0.001
lgb_params["early_stopping_round"] = 1000
lgb_params["num_iterations"] = 20000<statistical_test>
|
cnn_gen = CNN_Generator()
cnn_gen.fit(train_gen, valid_gen)
cnn_gen_pred = cnn_gen.predict(X_valid, batch_size = cnn.arguments['batch_size'], verbose = 1 )
|
Digit Recognizer
|
4,636,846 |
folds_average_lgbm = FoldsAverageLGBM(folds )<train_model>
|
train_predictions = []
valid_predictions = []
test_predictions = []
for i in range(5):
cnn = CNN_Generator()
cnn.fit(train_gen, valid_gen)
train_predictions += [cnn.predict(X_train, batch_size = cnn.arguments['batch_size'], verbose = 0)]
valid_predictions += [cnn.predict(X_valid, batch_size = cnn.arguments['batch_size'], verbose = 1)]
test_predictions += [cnn.predict(test.values.reshape(-1,28,28,1), batch_size = cnn.arguments['batch_size'], verbose = 1)]
|
Digit Recognizer
|
4,636,846 |
folds_average_lgbm.fit(lgb_params, train_x, train_y )<compute_test_metric>
|
train_pred = np.concatenate(train_predictions, axis=1)
valid_pred = np.concatenate(valid_predictions, axis=1)
test_pred = np.concatenate(test_predictions, axis=1)
svm = SVC(probability=True, gamma='scale' ).fit(train_pred, np.argmax(y_train, axis=1))
pred = svm.predict(valid_pred)
print("val_acc: ", round(np.sum(pred == np.argmax(y_valid, axis=1)) /y_valid.shape[0], 4))
print("val_ROC-AUC: ", round(roc_auc_score(y_valid, svm.predict_proba(valid_pred)) , 6))
|
Digit Recognizer
|
4,636,846 |
<predict_on_test><EOS>
|
sample_submission = pd.read_csv(".. /input/sample_submission.csv")
sample_submission['Label'] = svm.predict(test_pred)
sample_submission.head()
sample_submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
2,617,477 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<save_to_csv>
|
MaxPool2D, ReLU)
%matplotlib inline
|
Digit Recognizer
|
2,617,477 |
sub = df_sample.copy()
sub["target"] = y_pred
sub.to_csv("submission_optuna_lgbm_ohe_or_v1.csv", index=False)
sub.head()<import_modules>
|
print("Loading...")
data_train = pd.read_csv(".. /input/train.csv")
data_test = pd.read_csv(".. /input/test.csv")
print("Done!" )
|
Digit Recognizer
|
2,617,477 |
import os
import numpy as np
import pandas as pd<load_from_csv>
|
print("Training data: {} rows, {} columns.".format(data_train.shape[0], data_train.shape[1]))
print("Test data: {} rows, {} columns.".format(data_test.shape[0], data_test.shape[1]))
|
Digit Recognizer
|
2,617,477 |
pred1 = pd.read_csv(".. /input/tps-feb-submission-ensemble/submission_pseudo_lgb.csv")
pred2 = pd.read_csv(".. /input/tps-feb-submission-ensemble/submission_pseudo_lgb_4.csv")
pred3 = pd.read_csv(".. /input/tps-feb-submission-ensemble/submission_pseudo_lgb_5.csv")
pred4 = pd.read_csv(".. /input/tps-feb-submission-ensemble/pseudo_lgb_1.csv" )<compute_test_metric>
|
x_train = data_train.values[:, 1:]
y_train = data_train.values[:, 0]
|
Digit Recognizer
|
2,617,477 |
pred =(pred1.target + pred2.target + pred3.target + pred4.target)/ 4
pred<load_from_csv>
|
def convert_2d(x):
if len(x.shape)== 1:
m = 1
height = width = int(np.sqrt(x.shape[0]))
else:
m = x.shape[0]
height = width = int(np.sqrt(x.shape[1]))
x_2d = np.reshape(x,(m, height, width, 1))
return x_2d
|
Digit Recognizer
|
2,617,477 |
submission = pd.read_csv(".. /input/tabular-playground-series-feb-2021/sample_submission.csv")
submission.target = pred
submission<save_to_csv>
|
def translate(x, y, dist):
images = convert_2d(x)
m, height, width, channel = images.shape
anchors = []
anchors.append(( 0, height, int(dist * width), width, 0, height, 0, width - int(dist * width)))
anchors.append(( 0, height, 0, width - int(dist * width), 0, height, int(dist * width), width))
anchors.append(( int(dist * height), height, 0, width, 0, height - int(dist * height), 0, width))
anchors.append(( 0, height - int(dist * height), 0, width, int(dist * height), height, 0, width))
new_images = np.zeros(( 4, m, height, width, channel))
for i in range(4):
top, bottom, left, right, new_top, new_bottom, new_left, new_right = anchors[i]
new_images[i, :, new_top:new_bottom, new_left:new_right, :] = images[:, top:bottom, left:right, :]
new_images = np.reshape(new_images,(4 * m, -1))
y = np.tile(y,(4, 1)).reshape(( -1, 1))
new_images = np.concatenate(( y, new_images), axis=1 ).astype(int)
return new_images
|
Digit Recognizer
|
2,617,477 |
submission.to_csv("ensemble.csv", index=False )<install_modules>
|
def add_noise(x, y, noise_lvl):
m, n = x.shape
noise_num = int(noise_lvl * n)
for i in range(m):
noise_idx = np.random.randint(0, n, n ).argsort() [:noise_num]
x[i, noise_idx] = np.random.randint(0, 255, noise_num)
noisy_data = np.concatenate(( y.reshape(( -1, 1)) , x), axis=1 ).astype("int")
return noisy_data
|
Digit Recognizer
|
2,617,477 |
!pip install -q transformers ekphrasis keras-tuner<import_modules>
|
start = time.clock()
print("Augment the data...")
cropped_imgs = crop_image(x_train, y_train, 0.9)
translated_imgs = translate(x_train, y_train, 0.1)
noisy_imgs = add_noise(x_train, y_train, 0.1)
rotated_imgs = rotate_image(x_train, y_train, 10)
data_train = np.vstack(( data_train, cropped_imgs, translated_imgs, noisy_imgs, rotated_imgs))
np.random.shuffle(data_train)
print("Done!")
time_used = int(time.clock() - start)
print("Time used: {}s.".format(time_used))
|
Digit Recognizer
|
2,617,477 |
Input,
Dense,
Embedding,
Flatten,
Dropout,
GlobalMaxPooling1D,
GRU,
concatenate,
)
DistilBertTokenizerFast,
TFDistilBertModel,
DistilBertConfig,
)
<compute_train_metric>
|
x_train = data_train[:, 1:]
y_train = data_train[:, 0]
x_test = data_test.values
print("Augmented training data: {} rows, {} columns.".format(data_train.shape[0], data_train.shape[1]))
|
Digit Recognizer
|
2,617,477 |
def print_metrics(model, x_train, y_train, x_val, y_val):
train_acc = dict(model.evaluate(x_train, y_train, verbose=0, return_dict=True)) [
"accuracy"
]
val_acc = dict(model.evaluate(x_val, y_val, verbose=0, return_dict=True)) [
"accuracy"
]
val_preds = model.predict(x_val)
val_preds_bool = val_preds >= 0.5
print("")
print(f"Training Accuracy: {train_acc:.2%}")
print(f"Validation Accuracy: {val_acc:.2%}")
print("")
print(f"Validation f1 score: {sklearn.metrics.f1_score(val_preds_bool, y_val):.2%}" )<load_pretrained>
|
x_train = convert_2d(x_train)
x_test = convert_2d(x_test )
|
Digit Recognizer
|
2,617,477 |
model_class, tokenizer_class, pretrained_weights =(TFDistilBertModel, DistilBertTokenizerFast, 'distilbert-base-uncased')
pretrained_bert_tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
def get_pretrained_bert_model(config=pretrained_weights):
if not config:
config = DistilBertConfig(num_labels=2)
return model_class.from_pretrained(pretrained_weights, config=config)
<load_from_csv>
|
num_classes = 10
y_train = keras.utils.to_categorical(y_train, num_classes )
|
Digit Recognizer
|
2,617,477 |
train_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv" )<count_values>
|
x_train = x_train / 255
x_test = x_test / 255
|
Digit Recognizer
|
2,617,477 |
print("label counts:")
train_df.target.value_counts()<count_missing_values>
|
seed = np.random.randint(1, 100)
x_train, x_dev, y_train, y_dev = train_test_split(x_train, y_train, test_size=0.1, random_state=seed )
|
Digit Recognizer
|
2,617,477 |
print("train precentage of nulls:")
print(round(train_df.isnull().sum() / train_df.count() * 100, 2))<count_missing_values>
|
del data_train
del data_test
gc.collect()
|
Digit Recognizer
|
2,617,477 |
print("test precentage of nulls:")
print(round(test_df.isnull().sum() / test_df.count() * 100, 2))<categorify>
|
filters =(32, 32, 64, 64)
kernel =(5, 5)
drop_prob = 0.2
model = keras.models.Sequential()
model.add(Conv2D(filters[0], kernel, padding="same", input_shape=(28, 28, 1),
kernel_initializer=keras.initializers.he_normal()))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters[0], kernel, padding="same",
kernel_initializer=keras.initializers.he_normal()))
model.add(BatchNormalization())
model.add(ReLU())
model.add(MaxPool2D())
model.add(Dropout(drop_prob))
model.add(Conv2D(filters[1], kernel, padding="same",
kernel_initializer=keras.initializers.he_normal()))
model.add(BatchNormalization())
model.add(ReLU())
model.add(MaxPool2D())
model.add(Dropout(drop_prob))
model.add(Conv2D(filters[2], kernel, padding="same",
kernel_initializer=keras.initializers.he_normal()))
model.add(BatchNormalization())
model.add(ReLU())
model.add(MaxPool2D())
model.add(Dropout(drop_prob))
model.add(Conv2D(filters[3], kernel, padding="same",
kernel_initializer=keras.initializers.he_normal()))
model.add(BatchNormalization())
model.add(ReLU())
model.add(MaxPool2D())
model.add(Dropout(drop_prob))
model.add(Flatten())
model.add(Dropout(drop_prob))
model.add(Dense(128, activation="relu"))
model.add(Dropout(drop_prob))
model.add(Dense(num_classes, activation="softmax"))
model.compile(keras.optimizers.Adam() , "categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.