kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,437,082
preds_list_base = [] preds_list_final_iteration = [] preds_list_all = [] for train_idx, val_idx in split.split(X_train): X_tr = X_train.iloc[train_idx] X_val = X_train.iloc[val_idx] y_tr = y_train.iloc[train_idx] y_val = y_train.iloc[val_idx] Model = LGBMRegressor(**lgbm_params ).fit(X_tr, y_tr, eval_set=[(X_val, y_val)], eval_metric=['rmse'], early_stopping_rounds=250, categorical_feature=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], verbose=0) preds_list_base.append(Model.predict(X_test)) preds_list_all.append(Model.predict(X_test)) print(f'RMSE for Base model is {np.sqrt(mean_squared_error(y_val, Model.predict(X_val)))}') first_rmse = np.sqrt(mean_squared_error(y_val, Model.predict(X_val))) params = lgbm_params.copy() for i in range(1, 8): if i >2: params['reg_lambda'] *= 0.9 params['reg_alpha'] *= 0.9 params['num_leaves'] += 40 params['learning_rate'] = 0.003 Model = LGBMRegressor(**params ).fit(X_tr, y_tr, eval_set=[(X_val, y_val)], eval_metric=['rmse'], early_stopping_rounds=200, categorical_feature=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], verbose=0, init_model=Model) preds_list_all.append(Model.predict(X_test)) print(f'RMSE for Incremental trial {i} model is {np.sqrt(mean_squared_error(y_val, Model.predict(X_val)))}') last_rmse = np.sqrt(mean_squared_error(y_val, Model.predict(X_val))) print('',end=' ') print(f'Improvement of : {first_rmse - last_rmse}') print('-' * 100) preds_list_final_iteration.append(Model.predict(X_test))<prepare_x_and_y>
train_images = np.concatenate(( train_imagesKeras,train_imagesKaggle), axis=0) print("new Concatenated train_images ", train_images.shape) print("_"*50) train_labels = np.concatenate(( train_labelsKeras,train_labelsKaggle), axis=0) print("new Concatenated train_labels ", train_labels.shape )
Digit Recognizer
1,437,082
y_preds_base = np.array(preds_list_base ).mean(axis=0) y_preds_base<prepare_output>
model = models.Sequential() model.add(layers.Conv2D(32,(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.Dropout(0.5)) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Conv2D(64,(3, 3), activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Conv2D(64,(3, 3), activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10, activation='softmax')) model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy']) print(model.summary() )
Digit Recognizer
1,437,082
y_preds_all = np.array(preds_list_all ).mean(axis=0) y_preds_all<prepare_output>
num_epochs = 30 BatchSize = 2048 model.fit(train_images, train_labels, epochs=num_epochs, batch_size=BatchSize) test_loss, test_acc = model.evaluate(test_imagesKeras, test_labelsKeras) print("_"*80) print("Accuracy on test ", test_acc )
Digit Recognizer
1,437,082
y_preds_final_iteration = np.array(preds_list_final_iteration ).mean(axis=0) y_preds_final_iteration<create_dataframe>
def build_model() : model = models.Sequential() model.add(layers.Conv2D(32,(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.Dropout(0.5)) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Conv2D(64,(3, 3), activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Conv2D(64,(3, 3), activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10, activation='softmax')) model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy']) return model
Digit Recognizer
1,437,082
submission = pd.DataFrame({'id':test.id, 'target':y_preds_final_iteration} )<save_to_csv>
train_data = train_images train_targets = train_labels k = 4 num_val_samples = len(train_data)// k all_mae_histories = [] for i in range(k): print('processing fold val_data = train_data[i * num_val_samples:(i + 1)* num_val_samples] val_targets = train_targets[i * num_val_samples:(i + 1)* num_val_samples] partial_train_data = np.concatenate( [train_data[:i * num_val_samples], train_data[(i + 1)* num_val_samples:]], axis=0) partial_train_targets = np.concatenate( [train_targets[:i * num_val_samples], train_targets[(i + 1)* num_val_samples:]], axis=0) model = build_model() history = model.fit(partial_train_data, partial_train_targets, validation_data=(val_data, val_targets), epochs=num_epochs, batch_size=BatchSize, verbose=0) mae_history = history.history['acc'] all_mae_histories.append(mae_history) print("Done CV k-fold" )
Digit Recognizer
1,437,082
submission.to_csv('submission.csv', index=False )<save_to_csv>
train_imagesFin = np.concatenate(( train_images,test_imagesKeras), axis=0) print("train_imagesFin ", train_imagesFin.shape) print("_"*50) train_labelsFin = np.concatenate(( train_labels,test_labelsKeras), axis=0) print("train_labelsFin ", train_labelsFin.shape )
Digit Recognizer
1,437,082
submission.to_csv('submission.csv', index=False )<load_from_csv>
model = build_model() model.fit(train_imagesFin, train_labelsFin, epochs=num_epochs, batch_size=BatchSize )
Digit Recognizer
1,437,082
pd.read_csv('submission.csv' )<train_model>
RawPred = model.predict(test_imagesKaggle) pred = [] numTest = RawPred.shape[0] for i in range(numTest): pred.append(np.argmax(RawPred[i])) predictions = np.array(pred )
Digit Recognizer
1,437,082
<find_best_model_class><EOS>
sample_submission = pd.read_csv('.. /input/sample_submission.csv') result=pd.DataFrame({'ImageId':sample_submission.ImageId, 'Label':predictions}) result.to_csv("submission.csv",index=False) print(result )
Digit Recognizer
1,278,972
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<find_best_params>
training_data = pd.read_csv('.. /input/train.csv') test_data = pd.read_csv('.. /input/test.csv') training_data.head()
Digit Recognizer
1,278,972
study = optuna.create_study(direction='minimize') optimize = partial(objective, X=X_train, y=y_train, model=LGBMRegressor) <init_hyperparams>
x_train = training_data.drop('label', axis = 1) y_train = pd.DataFrame(data=training_data['label']) display(y_train.head()) display(x_train.head() )
Digit Recognizer
1,278,972
w1 = 0.2 w2 = 0.8<import_modules>
%matplotlib inline i= 1 imshow(x_train.iloc[i].values.reshape(( 28, 28))) print('This image corresponds to ', y_train.iloc[i] )
Digit Recognizer
1,278,972
import numpy as np import pandas as pd<load_from_csv>
cnn_model = Sequential() cnn_model.add(Conv2D(128,(3,3), padding='same', input_shape=(28,28,1), data_format='channels_last', activation='relu')) cnn_model.add(MaxPooling2D(pool_size=(2, 2))) cnn_model.add(Dropout(0.2)) cnn_model.add(Conv2D(128,(3,3), padding='same', activation='relu')) cnn_model.add(MaxPooling2D(pool_size=(2, 2))) cnn_model.add(Dropout(0.2)) cnn_model.add(Conv2D(256,(3,3), padding='same', activation='relu')) cnn_model.add(MaxPooling2D(pool_size=(2, 2))) cnn_model.add(Dropout(0.2)) cnn_model.add(Conv2D(256,(3,3), padding='valid', activation='relu')) cnn_model.add(Dropout(0.2)) cnn_model.add(Flatten()) cnn_model.add(Dense(units=10, activation='softmax')) cnn_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) print('The model was successfully created and compiled.') print('Shape of Layer Outputs:') for layer in cnn_model.layers: print(layer.name,': ',layer.output_shape )
Digit Recognizer
1,278,972
%%time df1 = pd.read_csv(".. /input/ensembling-starter-tps-feb-2021/submission.csv") df2 = pd.read_csv(".. /input/playground-series-february-21/submission_040.csv") blended_df = df1.copy(deep=True) blended_df['target'] = w1*df1['target'] + w2*df2['target'] print(blended_df.head() )<save_to_csv>
y_train_categorical = to_categorical(y_train, num_classes=10) reshaped_x = x_train.values.reshape(x_train.shape[0],28,28,1)/ 255 print(reshaped_x.shape) print(y_train_categorical.shape) cnn_model.fit(x=reshaped_x, y=y_train_categorical, batch_size=1000, epochs=32, verbose=1, validation_split=0.2 )
Digit Recognizer
1,278,972
blended_df.to_csv("blended_df.csv", index=None )<import_modules>
reshaped_test_data = test_data.values.reshape(test_data.shape[0],28,28,1)/ 255 predictions = cnn_model.predict(reshaped_test_data) display(predictions )
Digit Recognizer
1,278,972
import plotly.express as px import plotly.graph_objects as go import plotly.figure_factory as ff from plotly.subplots import make_subplots import matplotlib.pyplot as plt from colorama import Fore from pandas_profiling import ProfileReport import seaborn as sns from sklearn import metrics from scipy import stats import math from tqdm.notebook import tqdm from copy import deepcopy from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor from lightgbm import LGBMRegressor import lightgbm as lgb from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score import optuna from optuna import Trial, visualization from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score,confusion_matrix from sklearn.metrics import accuracy_score, mean_squared_error<load_from_csv>
predictions_formatted = np.argmax(predictions, axis=1) display(predictions_formatted )
Digit Recognizer
1,278,972
<concatenate><EOS>
submission = pd.DataFrame({'ImageId': np.arange(1,28001), 'Label': predictions_formatted}) submission.to_csv('submission_4.csv', index=False) print('Done' )
Digit Recognizer
480,900
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
%matplotlib inline print(os.path.dirname(os.getcwd())+':', os.listdir(os.path.dirname(os.getcwd()))); print(os.getcwd() +':', os.listdir(os.getcwd())) ;
Digit Recognizer
480,900
def modify_df(df): df['cat4'] = df['cat4'].apply(lambda x: x if x == 'B' else 'Z') df['cat5'] = df['cat5'].apply(lambda x: x if x in ['B', 'D'] else 'Z') df['cat6'] = df['cat6'].apply(lambda x: x if x == 'A' else 'Z') df['cat7'] = df['cat7'].apply(lambda x: x if x in ['E', 'D'] else 'Z') df['cat8'] = df['cat8'].apply(lambda x: x if x in ['E', 'C', 'G', 'A'] else 'Z') df['cont001'] = df['cont8'] * df['cont0'] df['cont002'] = df['cont9'] * df['cont0'] df['cont003'] = df['cont9'] * df['cont5'] df['cont004'] = df['cont8'] * df['cont5'] df['cont005'] = df['cont2'] * df['cont4'] df['cont006'] = df['cont1'] * df['cont3'] df['cont007'] = df['cont13'] * df['cont1'] return df mod_train_df = modify_df(train_df.copy()) mod_test_df = modify_df(test_df.copy() )<categorify>
if os.path.isfile('.. /input/train.csv'): data_df = pd.read_csv('.. /input/train.csv') print('train.csv loaded: data_df({0[0]},{0[1]})'.format(data_df.shape)) elif os.path.isfile('data/train.csv'): data_df = pd.read_csv('data/train.csv') print('train.csv loaded: data_df({0[0]},{0[1]})'.format(data_df.shape)) else: print('Error: train.csv not found') print('') print(data_df.isnull().any().describe()) print('') print('distinct labels ', data_df['label'].unique()) print('') print(data_df['label'].value_counts() )
Digit Recognizer
480,900
for feature in categorical_columns: le = LabelEncoder() le.fit(train_df[feature]) train_df[feature] = le.transform(train_df[feature]) test_df[feature] = le.transform(test_df[feature]) for feature in categorical_columns: le = LabelEncoder() le.fit(mod_train_df[feature]) mod_train_df[feature] = le.transform(mod_train_df[feature]) mod_test_df[feature] = le.transform(mod_test_df[feature] )<split>
def normalize_data(data): data = data / data.max() return data def dense_to_one_hot(labels_dense, num_classes): num_labels = labels_dense.shape[0] index_offset = np.arange(num_labels)* num_classes labels_one_hot = np.zeros(( num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel() ] = 1 return labels_one_hot def one_hot_to_dense(labels_one_hot): return np.argmax(labels_one_hot,1) def accuracy_from_dense_labels(y_target, y_pred): y_target = y_target.reshape(-1,) y_pred = y_pred.reshape(-1,) return np.mean(y_target == y_pred) def accuracy_from_one_hot_labels(y_target, y_pred): y_target = one_hot_to_dense(y_target ).reshape(-1,) y_pred = one_hot_to_dense(y_pred ).reshape(-1,) return np.mean(y_target == y_pred) x_train_valid = data_df.iloc[:,1:].values.reshape(-1,28,28,1) x_train_valid = x_train_valid.astype(np.float) x_train_valid = normalize_data(x_train_valid) image_width = image_height = 28 image_size = 784 y_train_valid_labels = data_df.iloc[:,0].values labels_count = np.unique(y_train_valid_labels ).shape[0]; plt.figure(figsize=(15,9)) for i in range(50): plt.subplot(5,10,1+i) plt.title(y_train_valid_labels[i]) plt.imshow(x_train_valid[i].reshape(28,28), cmap=cm.binary) y_train_valid = dense_to_one_hot(y_train_valid_labels, labels_count ).astype(np.uint8) y_valid_pred = {} y_train_pred = {} y_test_pred = {} train_loss, valid_loss = {}, {} train_acc, valid_acc = {}, {} print('x_train_valid.shape = ', x_train_valid.shape) print('y_train_valid_labels.shape = ', y_train_valid_labels.shape) print('image_size = ', image_size) print('image_width = ', image_width) print('image_height = ', image_height) print('labels_count = ', labels_count)
Digit Recognizer
480,900
x = train_df[feature_cols] y = train_df['target'] feature_cols_mod = mod_train_df.drop(['id', 'target'], axis=1 ).columns xmod, ymod = mod_train_df[feature_cols_mod], mod_train_df['target'] x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42 )<train_model>
def generate_images(imgs): image_generator = keras.preprocessing.image.ImageDataGenerator( rotation_range = 10, width_shift_range = 0.1 , height_shift_range = 0.1, zoom_range = 0.1) imgs = image_generator.flow(imgs.copy() , np.zeros(len(imgs)) , batch_size=len(imgs), shuffle = False ).next() return imgs[0] fig,axs = plt.subplots(5,10, figsize=(15,9)) for i in range(5): n = np.random.randint(0,x_train_valid.shape[0]-2) axs[i,0].imshow(x_train_valid[n:n+1].reshape(28,28),cmap=cm.binary) axs[i,1].imshow(generate_images(x_train_valid[n:n+1] ).reshape(28,28), cmap=cm.binary) axs[i,2].imshow(generate_images(x_train_valid[n:n+1] ).reshape(28,28), cmap=cm.binary) axs[i,3].imshow(generate_images(x_train_valid[n:n+1] ).reshape(28,28), cmap=cm.binary) axs[i,4].imshow(generate_images(x_train_valid[n:n+1] ).reshape(28,28), cmap=cm.binary) axs[i,5].imshow(generate_images(x_train_valid[n:n+1] ).reshape(28,28), cmap=cm.binary) axs[i,6].imshow(generate_images(x_train_valid[n:n+1] ).reshape(28,28), cmap=cm.binary) axs[i,7].imshow(generate_images(x_train_valid[n:n+1] ).reshape(28,28), cmap=cm.binary) axs[i,8].imshow(generate_images(x_train_valid[n:n+1] ).reshape(28,28), cmap=cm.binary) axs[i,9].imshow(generate_images(x_train_valid[n:n+1] ).reshape(28,28), cmap=cm.binary )
Digit Recognizer
480,900
clf = XGBRegressor(random_state=42, tree_method='gpu_hist') clf.fit(x_train, y_train )<compute_train_metric>
logreg = sklearn.linear_model.LogisticRegression(verbose=0, solver='lbfgs', multi_class='multinomial') decision_tree = sklearn.tree.DecisionTreeClassifier() extra_trees = sklearn.ensemble.ExtraTreesClassifier(verbose=0) gradient_boost = sklearn.ensemble.GradientBoostingClassifier(verbose=0) random_forest = sklearn.ensemble.RandomForestClassifier(verbose=0) gaussianNB = sklearn.naive_bayes.GaussianNB() base_models = {'logreg': logreg, 'extra_trees': extra_trees, 'gradient_boost': gradient_boost, 'random_forest': random_forest, 'decision_tree': decision_tree, 'gaussianNB': gaussianNB} take_models = ['logreg','random_forest','extra_trees'] for mn in take_models: train_acc[mn] = [] valid_acc[mn] = [] cv_num = 10 kfold = sklearn.model_selection.KFold(cv_num, shuffle=True, random_state=123) for i,(train_index, valid_index)in enumerate(kfold.split(x_train_valid)) : start = datetime.datetime.now() ; x_train = x_train_valid[train_index].reshape(-1,784) y_train = y_train_valid[train_index] x_valid = x_train_valid[valid_index].reshape(-1,784) y_valid = y_train_valid[valid_index] for mn in take_models: model = sklearn.base.clone(base_models[mn]) model.fit(x_train, one_hot_to_dense(y_train)) y_train_pred[mn] = model.predict_proba(x_train) y_valid_pred[mn] = model.predict_proba(x_valid) train_acc[mn].append(accuracy_from_one_hot_labels(y_train_pred[mn], y_train)) valid_acc[mn].append(accuracy_from_one_hot_labels(y_valid_pred[mn], y_valid)) print(i,': '+mn+' train/valid accuracy = %.3f/%.3f'%(train_acc[mn][-1], valid_acc[mn][-1])) if False: break; print(mn+': averaged train/valid accuracy = %.3f/%.3f'%(np.mean(train_acc[mn]), np.mean(valid_acc[mn])))
Digit Recognizer
480,900
predictions = clf.predict(x_test) score_rmse = math.sqrt(mean_squared_error(y_test, predictions)) print(Fore.GREEN + 'Base XGBoost RMSE: {}'.format(score_rmse))<predict_on_test>
class nn_class: def __init__(self, nn_name = 'nn_1'): self.s_f_conv1 = 3; self.n_f_conv1 = 36; self.s_f_conv2 = 3; self.n_f_conv2 = 36; self.s_f_conv3 = 3; self.n_f_conv3 = 36; self.n_n_fc1 = 576; self.mb_size = 50 self.keep_prob = 0.33 self.learn_rate_array = [10*1e-4, 7.5*1e-4, 5*1e-4, 2.5*1e-4, 1*1e-4, 1*1e-4, 1*1e-4,0.75*1e-4, 0.5*1e-4, 0.25*1e-4, 0.1*1e-4, 0.1*1e-4, 0.075*1e-4,0.050*1e-4, 0.025*1e-4, 0.01*1e-4, 0.0075*1e-4, 0.0050*1e-4,0.0025*1e-4,0.001*1e-4] self.learn_rate_step_size = 3 self.learn_rate = self.learn_rate_array[0] self.learn_rate_pos = 0 self.index_in_epoch = 0 self.current_epoch = 0 self.log_step = 0.2 self.n_log_step = 0 self.use_tb_summary = False self.use_tf_saver = False self.nn_name = nn_name self.perm_array = np.array([]) def next_mini_batch(self): start = self.index_in_epoch self.index_in_epoch += self.mb_size self.current_epoch += self.mb_size/len(self.x_train) if not len(self.perm_array)== len(self.x_train): self.perm_array = np.arange(len(self.x_train)) if start == 0: np.random.shuffle(self.perm_array) if self.index_in_epoch > self.x_train.shape[0]: np.random.shuffle(self.perm_array) start = 0 self.index_in_epoch = self.mb_size if self.train_on_augmented_data: self.x_train_aug = normalize_data(self.generate_images(self.x_train)) self.y_train_aug = self.y_train end = self.index_in_epoch if self.train_on_augmented_data: x_tr = self.x_train_aug[self.perm_array[start:end]] y_tr = self.y_train_aug[self.perm_array[start:end]] else: x_tr = self.x_train[self.perm_array[start:end]] y_tr = self.y_train[self.perm_array[start:end]] return x_tr, y_tr def generate_images(self, imgs): print('generate new set of images') image_generator = keras.preprocessing.image.ImageDataGenerator( rotation_range = 10, width_shift_range = 0.1 , height_shift_range = 0.1, zoom_range = 0.1) imgs = image_generator.flow(imgs.copy() , np.zeros(len(imgs)) , batch_size=len(imgs), shuffle = False ).next() return imgs[0] def weight_variable(self, shape, name = None): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial, name = name) def bias_variable(self, shape, name = None): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial, name = name) def conv2d(self, x, W, name = None): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name = name) def max_pool_2x2(self, x, name = None): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name = name) def summary_variable(self, var, var_name): with tf.name_scope(var_name): mean = tf.reduce_mean(var) stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('mean', mean) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var) def create_graph(self): tf.reset_default_graph() self.x_data_tf = tf.placeholder(dtype=tf.float32, shape=[None,28,28,1], name='x_data_tf') self.y_data_tf = tf.placeholder(dtype=tf.float32, shape=[None,10], name='y_data_tf') self.W_conv1_tf = self.weight_variable([self.s_f_conv1, self.s_f_conv1, 1, self.n_f_conv1], name = 'W_conv1_tf') self.b_conv1_tf = self.bias_variable([self.n_f_conv1], name = 'b_conv1_tf') self.h_conv1_tf = tf.nn.relu(self.conv2d(self.x_data_tf, self.W_conv1_tf)+ self.b_conv1_tf, name = 'h_conv1_tf') self.h_pool1_tf = self.max_pool_2x2(self.h_conv1_tf, name = 'h_pool1_tf') self.W_conv2_tf = self.weight_variable([self.s_f_conv2, self.s_f_conv2, self.n_f_conv1, self.n_f_conv2], name = 'W_conv2_tf') self.b_conv2_tf = self.bias_variable([self.n_f_conv2], name = 'b_conv2_tf') self.h_conv2_tf = tf.nn.relu(self.conv2d(self.h_pool1_tf, self.W_conv2_tf)+ self.b_conv2_tf, name ='h_conv2_tf') self.h_pool2_tf = self.max_pool_2x2(self.h_conv2_tf, name = 'h_pool2_tf') self.W_conv3_tf = self.weight_variable([self.s_f_conv3, self.s_f_conv3, self.n_f_conv2, self.n_f_conv3], name = 'W_conv3_tf') self.b_conv3_tf = self.bias_variable([self.n_f_conv3], name = 'b_conv3_tf') self.h_conv3_tf = tf.nn.relu(self.conv2d(self.h_pool2_tf, self.W_conv3_tf)+ self.b_conv3_tf, name = 'h_conv3_tf') self.h_pool3_tf = self.max_pool_2x2(self.h_conv3_tf, name = 'h_pool3_tf') self.W_fc1_tf = self.weight_variable([4*4*self.n_f_conv3,self.n_n_fc1], name = 'W_fc1_tf') self.b_fc1_tf = self.bias_variable([self.n_n_fc1], name = 'b_fc1_tf') self.h_pool3_flat_tf = tf.reshape(self.h_pool3_tf, [-1,4*4*self.n_f_conv3], name = 'h_pool3_flat_tf') self.h_fc1_tf = tf.nn.relu(tf.matmul(self.h_pool3_flat_tf, self.W_fc1_tf)+ self.b_fc1_tf, name = 'h_fc1_tf') self.keep_prob_tf = tf.placeholder(dtype=tf.float32, name = 'keep_prob_tf') self.h_fc1_drop_tf = tf.nn.dropout(self.h_fc1_tf, self.keep_prob_tf, name = 'h_fc1_drop_tf') self.W_fc2_tf = self.weight_variable([self.n_n_fc1, 10], name = 'W_fc2_tf') self.b_fc2_tf = self.bias_variable([10], name = 'b_fc2_tf') self.z_pred_tf = tf.add(tf.matmul(self.h_fc1_drop_tf, self.W_fc2_tf), self.b_fc2_tf, name = 'z_pred_tf') self.cross_entropy_tf = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( labels=self.y_data_tf, logits=self.z_pred_tf), name = 'cross_entropy_tf') self.learn_rate_tf = tf.placeholder(dtype=tf.float32, name="learn_rate_tf") self.train_step_tf = tf.train.AdamOptimizer(self.learn_rate_tf ).minimize( self.cross_entropy_tf, name = 'train_step_tf') self.y_pred_proba_tf = tf.nn.softmax(self.z_pred_tf, name='y_pred_proba_tf') self.y_pred_correct_tf = tf.equal(tf.argmax(self.y_pred_proba_tf, 1), tf.argmax(self.y_data_tf, 1), name = 'y_pred_correct_tf') self.accuracy_tf = tf.reduce_mean(tf.cast(self.y_pred_correct_tf, dtype=tf.float32), name = 'accuracy_tf') self.train_loss_tf = tf.Variable(np.array([]), dtype=tf.float32, name='train_loss_tf', validate_shape = False) self.valid_loss_tf = tf.Variable(np.array([]), dtype=tf.float32, name='valid_loss_tf', validate_shape = False) self.train_acc_tf = tf.Variable(np.array([]), dtype=tf.float32, name='train_acc_tf', validate_shape = False) self.valid_acc_tf = tf.Variable(np.array([]), dtype=tf.float32, name='valid_acc_tf', validate_shape = False) num_weights =(self.s_f_conv1**2*self.n_f_conv1 + self.s_f_conv2**2*self.n_f_conv1*self.n_f_conv2 + self.s_f_conv3**2*self.n_f_conv2*self.n_f_conv3 + 4*4*self.n_f_conv3*self.n_n_fc1 + self.n_n_fc1*10) num_biases = self.n_f_conv1 + self.n_f_conv2 + self.n_f_conv3 + self.n_n_fc1 print('num_weights =', num_weights) print('num_biases =', num_biases) return None def attach_summary(self, sess): self.use_tb_summary = True self.summary_variable(self.W_conv1_tf, 'W_conv1_tf') self.summary_variable(self.b_conv1_tf, 'b_conv1_tf') self.summary_variable(self.W_conv2_tf, 'W_conv2_tf') self.summary_variable(self.b_conv2_tf, 'b_conv2_tf') self.summary_variable(self.W_conv3_tf, 'W_conv3_tf') self.summary_variable(self.b_conv3_tf, 'b_conv3_tf') self.summary_variable(self.W_fc1_tf, 'W_fc1_tf') self.summary_variable(self.b_fc1_tf, 'b_fc1_tf') self.summary_variable(self.W_fc2_tf, 'W_fc2_tf') self.summary_variable(self.b_fc2_tf, 'b_fc2_tf') tf.summary.scalar('cross_entropy_tf', self.cross_entropy_tf) tf.summary.scalar('accuracy_tf', self.accuracy_tf) self.merged = tf.summary.merge_all() timestamp = datetime.datetime.now().strftime('%d-%m-%Y_%H-%M-%S') filepath = os.path.join(os.getcwd() , 'logs',(self.nn_name+'_'+timestamp)) self.train_writer = tf.summary.FileWriter(os.path.join(filepath,'train'), sess.graph) self.valid_writer = tf.summary.FileWriter(os.path.join(filepath,'valid'), sess.graph) def attach_saver(self): self.use_tf_saver = True self.saver_tf = tf.train.Saver() def train_graph(self, sess, x_train, y_train, x_valid, y_valid, n_epoch = 1, train_on_augmented_data = False): self.train_on_augmented_data = train_on_augmented_data self.x_train = x_train self.y_train = y_train self.x_valid = x_valid self.y_valid = y_valid if self.train_on_augmented_data: print('generate new set of images') self.x_train_aug = normalize_data(self.generate_images(self.x_train)) self.y_train_aug = self.y_train mb_per_epoch = self.x_train.shape[0]/self.mb_size train_loss, train_acc, valid_loss, valid_acc = [],[],[],[] start = datetime.datetime.now() ; print(datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S'),': start training') print('learnrate = ',self.learn_rate,', n_epoch = ', n_epoch, ', mb_size = ', self.mb_size) for i in range(int(n_epoch*mb_per_epoch)+1): self.learn_rate_pos = int(self.current_epoch // self.learn_rate_step_size) if not self.learn_rate == self.learn_rate_array[self.learn_rate_pos]: self.learn_rate = self.learn_rate_array[self.learn_rate_pos] print(datetime.datetime.now() -start,': set learn rate to %.6f'%self.learn_rate) x_batch, y_batch = self.next_mini_batch() sess.run(self.train_step_tf, feed_dict={self.x_data_tf: x_batch, self.y_data_tf: y_batch, self.keep_prob_tf: self.keep_prob, self.learn_rate_tf: self.learn_rate}) if i%int(self.log_step*mb_per_epoch)== 0 or i == int(n_epoch*mb_per_epoch): self.n_log_step += 1 feed_dict_train = { self.x_data_tf: self.x_train[self.perm_array[:len(self.x_valid)]], self.y_data_tf: self.y_train[self.perm_array[:len(self.y_valid)]], self.keep_prob_tf: 1.0} feed_dict_valid = {self.x_data_tf: self.x_valid, self.y_data_tf: self.y_valid, self.keep_prob_tf: 1.0} if self.use_tb_summary: train_summary = sess.run(self.merged, feed_dict = feed_dict_train) valid_summary = sess.run(self.merged, feed_dict = feed_dict_valid) self.train_writer.add_summary(train_summary, self.n_log_step) self.valid_writer.add_summary(valid_summary, self.n_log_step) train_loss.append(sess.run(self.cross_entropy_tf, feed_dict = feed_dict_train)) train_acc.append(self.accuracy_tf.eval(session = sess, feed_dict = feed_dict_train)) valid_loss.append(sess.run(self.cross_entropy_tf, feed_dict = feed_dict_valid)) valid_acc.append(self.accuracy_tf.eval(session = sess, feed_dict = feed_dict_valid)) print('%.2f epoch: train/val loss = %.4f/%.4f, train/val acc = %.4f/%.4f'%( self.current_epoch, train_loss[-1], valid_loss[-1], train_acc[-1], valid_acc[-1])) tl_c = np.concatenate([self.train_loss_tf.eval(session=sess), train_loss], axis = 0) vl_c = np.concatenate([self.valid_loss_tf.eval(session=sess), valid_loss], axis = 0) ta_c = np.concatenate([self.train_acc_tf.eval(session=sess), train_acc], axis = 0) va_c = np.concatenate([self.valid_acc_tf.eval(session=sess), valid_acc], axis = 0) sess.run(tf.assign(self.train_loss_tf, tl_c, validate_shape = False)) sess.run(tf.assign(self.valid_loss_tf, vl_c , validate_shape = False)) sess.run(tf.assign(self.train_acc_tf, ta_c , validate_shape = False)) sess.run(tf.assign(self.valid_acc_tf, va_c , validate_shape = False)) print('running time for training: ', datetime.datetime.now() - start) return None def save_model(self, sess): if self.use_tf_saver: filepath = os.path.join(os.getcwd() , self.nn_name) self.saver_tf.save(sess, filepath) if self.use_tb_summary: self.train_writer.close() self.valid_writer.close() return None def forward(self, sess, x_data): y_pred_proba = self.y_pred_proba_tf.eval(session = sess, feed_dict = {self.x_data_tf: x_data, self.keep_prob_tf: 1.0}) return y_pred_proba def load_tensors(self, graph): self.x_data_tf = graph.get_tensor_by_name("x_data_tf:0") self.y_data_tf = graph.get_tensor_by_name("y_data_tf:0") self.W_conv1_tf = graph.get_tensor_by_name("W_conv1_tf:0") self.W_conv2_tf = graph.get_tensor_by_name("W_conv2_tf:0") self.W_conv3_tf = graph.get_tensor_by_name("W_conv3_tf:0") self.W_fc1_tf = graph.get_tensor_by_name("W_fc1_tf:0") self.W_fc2_tf = graph.get_tensor_by_name("W_fc2_tf:0") self.b_conv1_tf = graph.get_tensor_by_name("b_conv1_tf:0") self.b_conv2_tf = graph.get_tensor_by_name("b_conv2_tf:0") self.b_conv3_tf = graph.get_tensor_by_name("b_conv3_tf:0") self.b_fc1_tf = graph.get_tensor_by_name("b_fc1_tf:0") self.b_fc2_tf = graph.get_tensor_by_name("b_fc2_tf:0") self.h_conv1_tf = graph.get_tensor_by_name('h_conv1_tf:0') self.h_pool1_tf = graph.get_tensor_by_name('h_pool1_tf:0') self.h_conv2_tf = graph.get_tensor_by_name('h_conv2_tf:0') self.h_pool2_tf = graph.get_tensor_by_name('h_pool2_tf:0') self.h_conv3_tf = graph.get_tensor_by_name('h_conv3_tf:0') self.h_pool3_tf = graph.get_tensor_by_name('h_pool3_tf:0') self.h_fc1_tf = graph.get_tensor_by_name('h_fc1_tf:0') self.z_pred_tf = graph.get_tensor_by_name('z_pred_tf:0') self.learn_rate_tf = graph.get_tensor_by_name("learn_rate_tf:0") self.keep_prob_tf = graph.get_tensor_by_name("keep_prob_tf:0") self.cross_entropy_tf = graph.get_tensor_by_name('cross_entropy_tf:0') self.train_step_tf = graph.get_operation_by_name('train_step_tf') self.z_pred_tf = graph.get_tensor_by_name('z_pred_tf:0') self.y_pred_proba_tf = graph.get_tensor_by_name("y_pred_proba_tf:0") self.y_pred_correct_tf = graph.get_tensor_by_name('y_pred_correct_tf:0') self.accuracy_tf = graph.get_tensor_by_name('accuracy_tf:0') self.train_loss_tf = graph.get_tensor_by_name("train_loss_tf:0") self.train_acc_tf = graph.get_tensor_by_name("train_acc_tf:0") self.valid_loss_tf = graph.get_tensor_by_name("valid_loss_tf:0") self.valid_acc_tf = graph.get_tensor_by_name("valid_acc_tf:0") return None def get_loss(self, sess): train_loss = self.train_loss_tf.eval(session = sess) valid_loss = self.valid_loss_tf.eval(session = sess) return train_loss, valid_loss def get_accuracy(self, sess): train_acc = self.train_acc_tf.eval(session = sess) valid_acc = self.valid_acc_tf.eval(session = sess) return train_acc, valid_acc def get_weights(self, sess): W_conv1 = self.W_conv1_tf.eval(session = sess) W_conv2 = self.W_conv2_tf.eval(session = sess) W_conv3 = self.W_conv3_tf.eval(session = sess) W_fc1_tf = self.W_fc1_tf.eval(session = sess) W_fc2_tf = self.W_fc2_tf.eval(session = sess) return W_conv1, W_conv2, W_conv3, W_fc1_tf, W_fc2_tf def get_biases(self, sess): b_conv1 = self.b_conv1_tf.eval(session = sess) b_conv2 = self.b_conv2_tf.eval(session = sess) b_conv3 = self.b_conv3_tf.eval(session = sess) b_fc1_tf = self.b_fc1_tf.eval(session = sess) b_fc2_tf = self.b_fc2_tf.eval(session = sess) return b_conv1, b_conv2, b_conv3, b_fc1_tf, b_fc2_tf def load_session_from_file(self, filename): tf.reset_default_graph() filepath = os.path.join(os.getcwd() , filename + '.meta') saver = tf.train.import_meta_graph(filepath) print(filepath) sess = tf.Session() saver.restore(sess, mn) graph = tf.get_default_graph() self.load_tensors(graph) return sess def get_activations(self, sess, x_data): feed_dict = {self.x_data_tf: x_data, self.keep_prob_tf: 1.0} h_conv1 = self.h_conv1_tf.eval(session = sess, feed_dict = feed_dict) h_pool1 = self.h_pool1_tf.eval(session = sess, feed_dict = feed_dict) h_conv2 = self.h_conv2_tf.eval(session = sess, feed_dict = feed_dict) h_pool2 = self.h_pool2_tf.eval(session = sess, feed_dict = feed_dict) h_conv3 = self.h_conv3_tf.eval(session = sess, feed_dict = feed_dict) h_pool3 = self.h_pool3_tf.eval(session = sess, feed_dict = feed_dict) h_fc1 = self.h_fc1_tf.eval(session = sess, feed_dict = feed_dict) h_fc2 = self.z_pred_tf.eval(session = sess, feed_dict = feed_dict) return h_conv1,h_pool1,h_conv2,h_pool2,h_conv3,h_pool3,h_fc1,h_fc2
Digit Recognizer
480,900
sub_xgb_base = clf.predict(test_df[feature_cols] )<train_model>
nn_name = ['tmp'] cv_num = 10 kfold = sklearn.model_selection.KFold(cv_num, shuffle=True, random_state=123) for i,(train_index, valid_index)in enumerate(kfold.split(x_train_valid)) : start = datetime.datetime.now() ; x_train = x_train_valid[train_index] y_train = y_train_valid[train_index] x_valid = x_train_valid[valid_index] y_valid = y_train_valid[valid_index] nn_graph = nn_class(nn_name = nn_name[i]) nn_graph.create_graph() nn_graph.attach_saver() with tf.Session() as sess: nn_graph.attach_summary(sess) sess.run(tf.global_variables_initializer()) nn_graph.train_graph(sess, x_train, y_train, x_valid, y_valid, n_epoch = 1.0) nn_graph.train_graph(sess, x_train, y_train, x_valid, y_valid, n_epoch = 14.0, train_on_augmented_data = True) nn_graph.save_model(sess) if True: break; print('total running time for training: ', datetime.datetime.now() - start)
Digit Recognizer
480,900
clf = LGBMRegressor(random_state=42, device='gpu') clf.fit(x_train, y_train )<compute_train_metric>
if False: !tensorboard --logdir=./logs
Digit Recognizer
480,900
predictions = clf.predict(x_test) score_rmse = math.sqrt(mean_squared_error(y_test, predictions)) print(Fore.GREEN + 'Base LGBM RMSE: {}'.format(score_rmse))<define_variables>
mn = nn_name[0] nn_graph = nn_class() sess = nn_graph.load_session_from_file(mn) W_conv1, W_conv2, W_conv3, _, _ = nn_graph.get_weights(sess) sess.close() print('W_conv1: min = ' + str(np.min(W_conv1)) + ' max = ' + str(np.max(W_conv1)) + ' mean = ' + str(np.mean(W_conv1)) + ' std = ' + str(np.std(W_conv1))) print('W_conv2: min = ' + str(np.min(W_conv2)) + ' max = ' + str(np.max(W_conv2)) + ' mean = ' + str(np.mean(W_conv2)) + ' std = ' + str(np.std(W_conv2))) print('W_conv3: min = ' + str(np.min(W_conv3)) + ' max = ' + str(np.max(W_conv3)) + ' mean = ' + str(np.mean(W_conv3)) + ' std = ' + str(np.std(W_conv3))) s_f_conv1 = nn_graph.s_f_conv1 s_f_conv2 = nn_graph.s_f_conv2 s_f_conv3 = nn_graph.s_f_conv3 W_conv1 = np.reshape(W_conv1,(s_f_conv1,s_f_conv1,1,6,6)) W_conv1 = np.transpose(W_conv1,(3,0,4,1,2)) W_conv1 = np.reshape(W_conv1,(s_f_conv1*6,s_f_conv1*6,1)) W_conv2 = np.reshape(W_conv2,(s_f_conv2,s_f_conv2,6,6,36)) W_conv2 = np.transpose(W_conv2,(2,0,3,1,4)) W_conv2 = np.reshape(W_conv2,(6*s_f_conv2,6*s_f_conv2,6,6)) W_conv2 = np.transpose(W_conv2,(2,0,3,1)) W_conv2 = np.reshape(W_conv2,(6*6*s_f_conv2,6*6*s_f_conv2)) W_conv3 = np.reshape(W_conv3,(s_f_conv3,s_f_conv3,6,6,36)) W_conv3 = np.transpose(W_conv3,(2,0,3,1,4)) W_conv3 = np.reshape(W_conv3,(6*s_f_conv3,6*s_f_conv3,6,6)) W_conv3 = np.transpose(W_conv3,(2,0,3,1)) W_conv3 = np.reshape(W_conv3,(6*6*s_f_conv3,6*6*s_f_conv3)) plt.figure(figsize=(15,5)) plt.subplot(1,3,1) plt.gca().set_xticks(np.arange(-0.5, s_f_conv1*6, s_f_conv1), minor = False); plt.gca().set_yticks(np.arange(-0.5, s_f_conv1*6, s_f_conv1), minor = False); plt.grid(which = 'minor', color='b', linestyle='-', linewidth=1) plt.title('W_conv1 ' + str(W_conv1.shape)) plt.colorbar(plt.imshow(W_conv1[:,:,0], cmap=cm.binary)) ; plt.subplot(1,3,2) plt.gca().set_xticks(np.arange(-0.5, 6*6*s_f_conv2, 6*s_f_conv2), minor = False); plt.gca().set_yticks(np.arange(-0.5, 6*6*s_f_conv2, 6*s_f_conv2), minor = False); plt.grid(which = 'minor', color='b', linestyle='-', linewidth=1) plt.title('W_conv2 ' + str(W_conv2.shape)) plt.colorbar(plt.imshow(W_conv2[:,:], cmap=cm.binary)) ; plt.subplot(1,3,3) plt.gca().set_xticks(np.arange(-0.5, 6*6*s_f_conv3, 6*s_f_conv3), minor = False); plt.gca().set_yticks(np.arange(-0.5, 6*6*s_f_conv3, 6*s_f_conv3), minor = False); plt.grid(which = 'minor', color='b', linestyle='-', linewidth=1) plt.title('W_conv3 ' + str(W_conv3.shape)) plt.colorbar(plt.imshow(W_conv3[:,:], cmap=cm.binary)) ;
Digit Recognizer
480,900
train_oof = np.zeros(( 300000,)) test_preds = 0 train_oof.shape<train_model>
mn = nn_name[0] nn_graph = nn_class() sess = nn_graph.load_session_from_file(mn) y_valid_pred[mn] = nn_graph.forward(sess, x_valid) sess.close() y_valid_pred_label = one_hot_to_dense(y_valid_pred[mn]) y_valid_label = one_hot_to_dense(y_valid) y_val_false_index = [] for i in range(y_valid_label.shape[0]): if y_valid_pred_label[i] != y_valid_label[i]: y_val_false_index.append(i) print(' plt.figure(figsize=(10,15)) for j in range(0,5): for i in range(0,10): if j*10+i<len(y_val_false_index): plt.subplot(10,10,j*10+i+1) plt.title('%d/%d'%(y_valid_label[y_val_false_index[j*10+i]], y_valid_pred_label[y_val_false_index[j*10+i]])) plt.imshow(x_valid[y_val_false_index[j*10+i]].reshape(28,28),cmap=cm.binary )
Digit Recognizer
480,900
NUM_FOLDS = 5 kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=0) for f,(train_ind, val_ind)in tqdm(enumerate(kf.split(x, y))): tmp_train_df, tmp_val_df = x.iloc[train_ind][feature_cols], x.iloc[val_ind][feature_cols] train_target, val_target = y[train_ind], y[val_ind] model = LGBMRegressor(random_state=42, device='gpu') model.fit(tmp_train_df, train_target, eval_set=[(tmp_val_df, val_target)], verbose=False) temp_oof = model.predict(tmp_val_df) temp_test = model.predict(test_df[feature_cols]) train_oof[val_ind] = temp_oof test_preds += temp_test/NUM_FOLDS print(mean_squared_error(temp_oof, val_target, squared=False))<save_to_csv>
if os.path.isfile('.. /input/test.csv'): test_df = pd.read_csv('.. /input/test.csv') print('test.csv loaded: test_df{0}'.format(test_df.shape)) elif os.path.isfile('data/test.csv'): test_df = pd.read_csv('data/test.csv') print('test.csv loaded: test_df{0}'.format(test_df.shape)) else: print('Error: test.csv not found') x_test = test_df.iloc[:,0:].values.reshape(-1,28,28,1) x_test = x_test.astype(np.float) x_test = normalize_data(x_test) print('x_test.shape = ', x_test.shape) y_test_pred = {} y_test_pred_labels = {}
Digit Recognizer
480,900
sub_df['target'] = test_preds sub_df.to_csv('submission_lgbm_cv.csv', index=False) sub_df.head() sub_lgbm_cv = test_preds<init_hyperparams>
if False: take_models = ['nn0','nn1','nn2','nn3','nn4','nn5','nn6','nn7','nn8','nn9'] kfold = sklearn.model_selection.KFold(len(take_models), shuffle=True, random_state = 123) x_train_meta = np.array([] ).reshape(-1,10) y_train_meta = np.array([] ).reshape(-1,10) x_test_meta = np.zeros(( x_test.shape[0], 10)) print('Out-of-folds predictions:') for i,(train_index, valid_index)in enumerate(kfold.split(x_train_valid)) : x_train = x_train_valid[train_index] y_train = y_train_valid[train_index] x_valid = x_train_valid[valid_index] y_valid = y_train_valid[valid_index] mn = take_models[i] nn_graph = nn_class() sess = nn_graph.load_session_from_file(mn) y_train_pred[mn] = nn_graph.forward(sess, x_train[:len(x_valid)]) y_valid_pred[mn] = nn_graph.forward(sess, x_valid) y_test_pred[mn] = nn_graph.forward(sess, x_test) sess.close() x_train_meta = np.concatenate([x_train_meta, y_valid_pred[mn]]) y_train_meta = np.concatenate([y_train_meta, y_valid]) x_test_meta += y_test_pred[mn] print(take_models[i],': train/valid accuracy = %.4f/%.4f'%( accuracy_from_one_hot_labels(y_train_pred[mn], y_train[:len(x_valid)]), accuracy_from_one_hot_labels(y_valid_pred[mn], y_valid))) if False: break; x_test_meta = x_test_meta/(i+1) y_test_pred['stacked_models'] = x_test_meta print('') print('Stacked models: valid accuracy = %.4f'%accuracy_from_one_hot_labels(x_train_meta, y_train_meta))
Digit Recognizer
480,900
xgb_params = { 'booster':'gbtree', 'n_estimators':20000, 'max_depth':5, 'eta':0.008, 'gamma':3.5, 'objective':'reg:squarederror', 'verbosity':0, 'subsample':0.75, 'colsample_bytree':0.35, 'reg_lambda':0.23, 'reg_alpha':0.52, 'scale_pos_weight':1, 'objective':'reg:squarederror', 'eval_metric':'rmse', 'seed': 42, 'tree_method':'gpu_hist', 'gpu_id':0 }<train_model>
if False: logreg = sklearn.linear_model.LogisticRegression(verbose=0, solver='lbfgs', multi_class='multinomial') take_meta_model = 'logreg' model = sklearn.base.clone(base_models[take_meta_model]) model.fit(x_train_meta, one_hot_to_dense(y_train_meta)) y_train_pred['meta_model'] = model.predict_proba(x_train_meta) y_test_pred['meta_model'] = model.predict_proba(x_test_meta) print('Meta model: train accuracy = %.4f'%accuracy_from_one_hot_labels(x_train_meta, y_train_pred['meta_model']))
Digit Recognizer
480,900
train_oof = np.zeros(( 300000,)) test_preds = 0 train_oof.shape NUM_FOLDS = 5 kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=42) for f,(train_ind, val_ind)in tqdm(enumerate(kf.split(x, y))): tmp_train_df, tmp_val_df = x.iloc[train_ind][feature_cols], x.iloc[val_ind][feature_cols] train_target, val_target = y[train_ind], y[val_ind] model = XGBRegressor(**xgb_params) model.fit( tmp_train_df, train_target, eval_set=[(tmp_val_df, val_target)], early_stopping_rounds = 50, verbose = False ) temp_oof = model.predict(tmp_val_df) temp_test = model.predict(test_df[feature_cols]) train_oof[val_ind] = temp_oof test_preds += temp_test/NUM_FOLDS print(mean_squared_error(temp_oof, val_target, squared=False))<save_to_csv>
if True: mn = nn_name[0] nn_graph = nn_class() sess = nn_graph.load_session_from_file(mn) y_test_pred = {} y_test_pred_labels = {} kfold = sklearn.model_selection.KFold(40, shuffle=False) for i,(train_index, valid_index)in enumerate(kfold.split(x_test)) : if i==0: y_test_pred[mn] = nn_graph.forward(sess, x_test[valid_index]) else: y_test_pred[mn] = np.concatenate([y_test_pred[mn], nn_graph.forward(sess, x_test[valid_index])]) sess.close()
Digit Recognizer
480,900
<init_hyperparams><EOS>
mn = nn_name[0] y_test_pred_labels[mn] = one_hot_to_dense(y_test_pred[mn]) print(mn+': y_test_pred_labels[mn].shape = ', y_test_pred_labels[mn].shape) unique, counts = np.unique(y_test_pred_labels[mn], return_counts=True) print(dict(zip(unique, counts))) np.savetxt('submission.csv', np.c_[range(1,len(x_test)+1), y_test_pred_labels[mn]], delimiter=',', header = 'ImageId,Label', comments = '', fmt='%d') print('submission.csv completed' )
Digit Recognizer
1,195,173
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model>
print(os.listdir(".. /input"))
Digit Recognizer
1,195,173
train_oof = np.zeros(( 300000,)) test_preds = 0 train_oof.shape NUM_FOLDS = 5 kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=42) for f,(train_ind, val_ind)in tqdm(enumerate(kf.split(xmod, ymod))): tmp_train_df, tmp_val_df = xmod.iloc[train_ind][feature_cols_mod], xmod.iloc[val_ind][feature_cols_mod] train_target, val_target = ymod[train_ind], ymod[val_ind] model = XGBRegressor(**xgb_params) model.fit( tmp_train_df, train_target, eval_set=[(tmp_val_df, val_target)], early_stopping_rounds = 50, verbose = False ) temp_oof = model.predict(tmp_val_df) temp_test = model.predict(mod_test_df[feature_cols_mod]) train_oof[val_ind] = temp_oof test_preds += temp_test/NUM_FOLDS print(mean_squared_error(temp_oof, val_target, squared=False))<save_to_csv>
img_rows, img_cols = 28, 28
Digit Recognizer
1,195,173
sub_df['target'] = test_preds sub_df.to_csv('submission_xgb_mod_cv_optimized.csv', index=False) sub_df.head() sub_xgb_mod_cv_optimized = test_preds<split>
def load_dataset(train_path,test_path): global train,test,trainX,trainY,nb_classes train = pd.read_csv(train_path ).values test = pd.read_csv(test_path ).values print("Train Shape :",train.shape) trainX = train[:, 1:].reshape(train.shape[0], img_rows, img_cols, 1) trainX = trainX.astype(float) trainX /= 255.0 trainY = kutils.to_categorical(train[:, 0]) nb_classes = trainY.shape[1] print("TrainX Shape : ",trainX.shape) print("Trainy shape : ",trainY.shape) testX = test.reshape(test.shape[0], 28, 28, 1) testX = testX.astype(float) testX /= 255.0 trainY = kutils.to_categorical(train[:, 0]) return train,test,trainX,trainY,testX,nb_classes
Digit Recognizer
1,195,173
def objective(trial,data=x,target=y): train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.15,random_state=42) param = { 'device':'gpu', 'metric': 'rmse', 'random_state': 42, 'reg_lambda': trial.suggest_loguniform( 'reg_lambda', 1e-3, 10.0 ), 'reg_alpha': trial.suggest_loguniform( 'reg_alpha', 1e-3, 10.0 ), 'colsample_bytree': trial.suggest_categorical( 'colsample_bytree', [0.3,0.5,0.6,0.7,0.8,0.9,1.0] ), 'subsample': trial.suggest_categorical( 'subsample', [0.6,0.7,0.8,1.0] ), 'learning_rate': trial.suggest_categorical( 'learning_rate', [0.008,0.009,0.01,0.012,0.014,0.016,0.018, 0.02] ), 'n_estimators': trial.suggest_categorical( "n_estimators", [150, 200, 300, 3000] ), 'max_depth': trial.suggest_categorical( 'max_depth', [4,5,7,9,11,13,15,17,20] ), 'min_child_samples': trial.suggest_int( 'min_child_samples', 1, 300 ), 'num_leaves': trial.suggest_int( 'num_leaves', 15, 120 ), } model = LGBMRegressor(**param) model.fit(train_x,train_y,eval_set=[(test_x,test_y)], early_stopping_rounds=300, verbose=False) preds = model.predict(test_x) rmse = mean_squared_error(test_y, preds,squared=False) return rmse<train_model>
def createModel(inp_shape,nClasses): model = models.Sequential() model.add(Conv2D(32,(3, 3), padding='same', activation='relu', input_shape=inp_shape)) model.add(Conv2D(32,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64,(3, 3), padding='same', activation='relu')) model.add(Conv2D(64,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64,(3, 3), padding='same', activation='relu')) model.add(Conv2D(64,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(nClasses, activation='softmax')) optimizer1 = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer=optimizer1, loss='categorical_crossentropy', metrics=['accuracy']) return model
Digit Recognizer
1,195,173
study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=5) print('Number of finished trials:', len(study.trials)) print('Best trial:', study.best_trial.params )<find_best_params>
def submission(prediction): np.savetxt('mnist-submission.csv', np.c_[range(1,len(prediction)+1),prediction], delimiter=',', header = 'ImageId,Label', comments = '', fmt='%d' )
Digit Recognizer
1,195,173
study.best_params<compute_train_metric>
def classification_report(X_test,test): predicted_classes = model.predict_classes(X_test) y_true = test.iloc[:, 0] correct = np.nonzero(predicted_classes==y_true)[0] incorrect = np.nonzero(predicted_classes!=y_true)[0] target_names = ["Class {}".format(i)for i in range(num_classes)] print(classification_report(y_true, predicted_classes, target_names=target_names))
Digit Recognizer
1,195,173
best_params = { 'reg_lambda': 0.015979956459638782, 'reg_alpha': 9.103977313355028, 'colsample_bytree': 0.3, 'subsample': 1.0, 'learning_rate': 0.009, 'n_estimators': 3000, 'max_depth': 15, 'min_child_samples': 142, 'num_leaves': 84, 'random_state': 42, 'device': 'gpu', } clf = LGBMRegressor(**best_params) clf.fit(x_train, y_train) predictions = clf.predict(x_test) score_rmse = math.sqrt(mean_squared_error(y_test, predictions)) print(Fore.GREEN + 'Base LGBM RMSE: {}'.format(score_rmse))<save_to_csv>
train_path=".. /input/train.csv" test_path=".. /input/test.csv" train,test,trainX,trainY,testX,nb_classes=load_dataset(train_path,test_path) X_train, X_test, y_train, y_test = train_test_split(trainX,trainY,test_size=0.1, random_state=21) inp_shape=(28,28,1) model=createModel(inp_shape,nb_classes) imgaug=False batch_size=128 nb_epochs=30 learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001) if imgaug==True: datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(trainX) out = model.fit_generator(datagen.flow(trainX,trainY, batch_size=batch_size), epochs = nb_epochs, validation_data =(X_test,y_test), verbose = 2, steps_per_epoch=batch_size // batch_size , callbacks=[learning_rate_reduction]) else: out=model.fit(trainX, trainY, batch_size=batch_size, nb_epoch=nb_epochs, verbose=1, validation_data=(X_test, y_test)) yPred = model.predict_classes(testX) print("Predictions : ",yPred) submission(yPred) result_visualization(out)
Digit Recognizer
1,195,173
sub_preds = clf.predict(test_df[feature_cols]) sub_df['target'] = sub_preds sub_df.to_csv('submission_lgbm_optuna.csv', index=False) sub_df.head() sub_lgbm_optuna = sub_preds<init_hyperparams>
Digit Recognizer
3,148,616
lgbm_params = { "random_state": 2021, "metric": "rmse", "n_jobs": -1, "cat_feature": [x for x in range(len(categorical_columns)) ], "early_stopping_round": 150, "reg_alpha": 6.147694913504962, "reg_lambda": 0.002457826062076097, "colsample_bytree": 0.3, "learning_rate": 0.01, "max_depth": 30, "num_leaves": 100, "min_child_samples": 275, "n_estimators": 30000, "cat_smooth": 40.0, "max_bin": 512, "min_data_per_group": 100, "bagging_freq": 1, "bagging_fraction": 0.7, "cat_l2": 12.0, }<train_model>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
3,148,616
train_oof = np.zeros(( 300000,)) test_preds = 0 train_oof.shape NUM_FOLDS = 5 kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=42) for f,(train_ind, val_ind)in tqdm(enumerate(kf.split(x, y))): tmp_train_df, tmp_val_df = x.iloc[train_ind][feature_cols], x.iloc[val_ind][feature_cols] train_target, val_target = y[train_ind], y[val_ind] model = LGBMRegressor(**lgbm_params) model.fit( tmp_train_df, train_target, eval_set=[(tmp_val_df, val_target)], early_stopping_rounds = 100, verbose=False ) temp_oof = model.predict(tmp_val_df) temp_test = model.predict(test_df[feature_cols]) train_oof[val_ind] = temp_oof test_preds += temp_test/NUM_FOLDS print(mean_squared_error(temp_oof, val_target, squared=False))<save_to_csv>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
3,148,616
sub_df['target'] = test_preds sub_df.to_csv('submission_lgbm_cv_optimized.csv', index=False) sub_df.head() sub_lgbm_cv_optimized = test_preds<train_model>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1 )
Digit Recognizer
3,148,616
train_oof = np.zeros(( 300000,)) test_preds = 0 train_oof.shape NUM_FOLDS = 5 kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=42) for f,(train_ind, val_ind)in tqdm(enumerate(kf.split(xmod, ymod))): tmp_train_df, tmp_val_df = xmod.iloc[train_ind][feature_cols_mod], xmod.iloc[val_ind][feature_cols_mod] train_target, val_target = ymod[train_ind], ymod[val_ind] model = LGBMRegressor(**lgbm_params) model.fit( tmp_train_df, train_target, eval_set=[(tmp_val_df, val_target)], early_stopping_rounds = 100, verbose=False ) temp_oof = model.predict(tmp_val_df) temp_test = model.predict(mod_test_df[feature_cols_mod]) train_oof[val_ind] = temp_oof test_preds += temp_test/NUM_FOLDS print(mean_squared_error(temp_oof, val_target, squared=False))<save_to_csv>
X_train = X_train / 255.0 test = test / 255
Digit Recognizer
3,148,616
sub_df['target'] = test_preds sub_df.to_csv('submission_lgbm_mod_cv_optimized.csv', index=False) sub_df.head() sub_lgbm_mod_cv_optimized = test_preds<set_options>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2 )
Digit Recognizer
3,148,616
h2o.init()<split>
model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28, 1)) , keras.layers.Dense(128, activation=tf.nn.relu), keras.layers.Dense(10, activation=tf.nn.softmax) ] )
Digit Recognizer
3,148,616
train_hf = h2o.H2OFrame(train_df) test_hf = h2o.H2OFrame(test_df) predictors = list(feature_cols) response = 'target' train, valid = train_hf.split_frame(ratios=[.8], seed=1234 )<choose_model_class>
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,148,616
aml = H2OAutoML( max_models=20, max_runtime_secs=200, exclude_algos = ["DeepLearning", "DRF"], seed=42, )<train_model>
history = model.fit(X_train, Y_train, epochs=5 )
Digit Recognizer
3,148,616
aml.train(x=predictors, y=response, training_frame=train, validation_frame=valid )<compute_test_metric>
hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch hist.tail()
Digit Recognizer
3,148,616
print('The model performance in RMSE: {}'.format(aml.leader.rmse(valid=True))) print('The model performance in MAE: {}'.format(aml.leader.mae(valid=True)) )<predict_on_test>
test_loss, test_acc = model.evaluate(X_val, Y_val) print('Test accuracy:', test_acc )
Digit Recognizer
3,148,616
preds = aml.predict(test_hf ).as_data_frame() preds.head()<save_to_csv>
model = keras.Sequential([ tf.keras.layers.Conv2D(32,(3,3), padding='same', activation=tf.nn.relu, input_shape=(28, 28, 1)) , tf.keras.layers.Conv2D(32,(3,3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPooling2D(( 2, 2), strides=2), tf.keras.layers.Dropout(0.25), tf.keras.layers.Conv2D(64,(3,3), padding='same', activation=tf.nn.relu), tf.keras.layers.Conv2D(64,(3,3), padding='same', activation=tf.nn.relu), tf.keras.layers.MaxPooling2D(( 2, 2), strides=2), tf.keras.layers.Dropout(0.25), keras.layers.Flatten() , keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dropout(0.50), keras.layers.Dense(10, activation=tf.nn.softmax) ] )
Digit Recognizer
3,148,616
sub_df['target'] = preds['predict'] sub_df.to_csv('submission_h2o.csv', index=False) sub_df.head() sub_automl = preds['predict']<define_variables>
model.compile(optimizer = 'adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
3,148,616
sub1 = 0.3*sub_xgb_cv_optimized + 0.3*sub_lgbm_cv_optimized + 0.4*sub_lgbm_optuna sub2 = 0.4*sub_xgb_cv_optimized + 0.4*sub_lgbm_cv_optimized + 0.2*sub_lgbm_optuna sub3 = 0.3*sub_xgb_cv_optimized + 0.4*sub_lgbm_cv_optimized + 0.3*sub_lgbm_optuna sub4 = 0.3*sub_xgb_cv_optimized + 0.3*sub_lgbm_cv_optimized + 0.3*sub_lgbm_optuna + 0.1*sub_automl sub5 = 0.3*sub_xgb_cv_optimized + 0.3*sub_lgbm_cv_optimized + 0.2*sub_lgbm_optuna + 0.2*sub_automl sub6 = 0.5*sub_lgbm_mod_cv_optimized + 0.5*sub_xgb_mod_cv_optimized sub7 = 0.7*sub_lgbm_mod_cv_optimized + 0.3*sub_xgb_mod_cv_optimized sub8 = 0.2*sub_xgb_cv_optimized + 0.2*sub_lgbm_cv_optimized + 0.3*sub_lgbm_mod_cv_optimized + 0.3*sub_xgb_mod_cv_optimized sub9 = 0.1*sub_xgb_cv_optimized + 0.1*sub_lgbm_cv_optimized + 0.4*sub_lgbm_mod_cv_optimized + 0.4*sub_xgb_mod_cv_optimized sub10 = 0.1*sub_xgb_cv_optimized + 0.2*sub_lgbm_cv_optimized + 0.4*sub_lgbm_mod_cv_optimized + 0.3*sub_xgb_mod_cv_optimized sub11 = 0.5*sub_xgb_cv_optimized + 0.5*sub_lgbm_cv_optimized sub12 = 0.6*sub_xgb_cv_optimized + 0.4*sub_lgbm_cv_optimized sub13 = 0.4*sub_xgb_cv_optimized + 0.6*sub_lgbm_cv_optimized sub14 = 0.2*sub_xgb_cv_optimized + 0.7*sub_lgbm_cv_optimized + 0.1*sub_lgbm_mod_cv_optimized sub14 = 0.2*sub_xgb_cv_optimized + 0.7*sub_lgbm_cv_optimized + 0.1*sub_xgb_mod_cv_optimized<save_to_csv>
history = model.fit(X_train, Y_train, epochs=30 )
Digit Recognizer
3,148,616
sub_df['target'] = sub1 sub_df.to_csv('submission_01.csv', index=False) sub_df['target'] = sub2 sub_df.to_csv('submission_02.csv', index=False) sub_df['target'] = sub3 sub_df.to_csv('submission_03.csv', index=False) sub_df['target'] = sub4 sub_df.to_csv('submission_04.csv', index=False) sub_df['target'] = sub5 sub_df.to_csv('submission_05.csv', index=False) sub_df['target'] = sub6 sub_df.to_csv('submission_06.csv', index=False) sub_df['target'] = sub7 sub_df.to_csv('submission_07.csv', index=False) sub_df['target'] = sub8 sub_df.to_csv('submission_08.csv', index=False) sub_df['target'] = sub9 sub_df.to_csv('submission_09.csv', index=False) sub_df['target'] = sub10 sub_df.to_csv('submission_10.csv', index=False) sub_df['target'] = sub11 sub_df.to_csv('submission_11.csv', index=False) sub_df['target'] = sub12 sub_df.to_csv('submission_12.csv', index=False) sub_df['target'] = sub13 sub_df.to_csv('submission_13.csv', index=False) sub_df['target'] = sub14 sub_df.to_csv('submission_14.csv', index=False )<load_from_csv>
test_loss, test_acc = model.evaluate(X_val, Y_val) print('Test accuracy:', test_acc )
Digit Recognizer
3,148,616
PATH = '.. /input/tabular-playground-series-feb-2021/' train = pd.read_csv(PATH + 'train.csv') test = pd.read_csv(PATH + 'test.csv') sample = pd.read_csv(PATH + 'sample_submission.csv') print(train.shape, test.shape )<drop_column>
predictions = model.predict(test )
Digit Recognizer
3,148,616
FEATURES = train.drop(['id', 'target'], 1 ).columns FEATURES<categorify>
np.argmax(predictions[0] )
Digit Recognizer
3,148,616
for i in cat_features: le = LabelEncoder() le.fit(train[i]) train[i] = le.transform(train[i]) test[i] = le.transform(test[i]) train.head()<choose_model_class>
predictions = np.argmax(predictions,axis = 1) predictions = pd.Series(predictions,name="Label" )
Digit Recognizer
3,148,616
<split><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),predictions],axis = 1) submission.to_csv("mnist_submission_v6.csv",index=False )
Digit Recognizer
3,885,820
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model>
import tensorflow as tf import numpy as np import pandas as pd import random from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Dense,Conv2D,Flatten,Dropout,MaxPooling2D
Digit Recognizer
3,885,820
def objective(trial): train_set = lgb.Dataset(X_train, y_train) val_set = lgb.Dataset(X_val, y_val) param = { "objective": "regression", "metric": "rmse", "verbosity": 1, "boosting_type": "gbdt", "num_leaves": trial.suggest_int("num_leaves", 0, 256), "max_depth": trial.suggest_int("max_depth", 3, 31), "lambda_l1": trial.suggest_float("lambda_l1", 0.0, 10), "lambda_l2": trial.suggest_float("lambda_l2", 0.4, 10), "feature_fraction": trial.suggest_float("feature_fraction", 0.4, 0.9), "bagging_fraction": trial.suggest_float("bagging_fraction", 0.4, 0.9), "bagging_freq": trial.suggest_int("bagging_freq", 5, 15), "min_child_samples": trial.suggest_int("min_child_samples", 5, 100), } model = lgb.train(param, train_set, num_boost_round=NUM_BOOST_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=VERBOSE_EVAL, valid_sets=[train_set, val_set]) val_preds = model.predict(X_val, num_iteration=model.best_iteration) scc = math.sqrt(mean_squared_error(val_preds, y_val)) return -1*scc<find_best_params>
train_df = pd.read_csv('.. /input/train.csv' )
Digit Recognizer
3,885,820
study = optuna.create_study(direction="maximize") study.optimize(objective, n_trials=100) trial = study.best_trial trial.params['metric'] = 'rmse'<find_best_params>
label_df = train_df.label train_df = train_df.drop('label', axis=1 )
Digit Recognizer
3,885,820
print(trial.params )<train_model>
DS_SIZE = len(train_df) BATCH_SIZE = 64 x_train, x_val, y_train, y_val = train_test_split( np.reshape(train_df.values,(DS_SIZE, 28, 28, 1)) , label_df.values, test_size=0.01, random_state=11) train_gen = ImageDataGenerator( rescale=1./255, shear_range=10, zoom_range=0.1, width_shift_range=0.05, height_shift_range=0.05, rotation_range=15, brightness_range=(0.9,1.1), fill_mode='constant' ) train_gen_flow = train_gen.flow( x_train, y_train, batch_size=BATCH_SIZE )
Digit Recognizer
3,885,820
for train_idx, val_idx in cv.split(X, y): X_train, X_val = X.iloc[train_idx], X.iloc[val_idx] y_train, y_val = y[train_idx], y[val_idx] train_set = lgb.Dataset(X_train, y_train) val_set = lgb.Dataset(X_val, y_val) model = lgb.train(trial.params, train_set, num_boost_round=NUM_BOOST_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=-10, valid_sets=[train_set, val_set] ) val_preds = model.predict(X_val, num_iteration=model.best_iteration) test_preds = model.predict( test[FEATURES], num_iteration=model.best_iteration) oof_df.loc[oof_df.iloc[val_idx].index, 'oof'] = val_preds sample[f'fold{fold_}'] = test_preds score = mean_squared_error( oof_df.loc[oof_df.iloc[val_idx].index]['target'], oof_df.loc[oof_df.iloc[val_idx].index]['oof']) print(math.sqrt(score)) fold_ += 1<save_to_csv>
val_gen = ImageDataGenerator(rescale=1./255) val_gen_flow = val_gen.flow( x_val, y_val, batch_size=BATCH_SIZE )
Digit Recognizer
3,885,820
print(math.sqrt(mean_squared_error(oof_df.target, oof_df.oof))) sample['target'] = sample.drop(['id', 'target'], 1 ).mean(axis=1) sample[['id', 'target']].to_csv('submission.csv', index=False )<import_modules>
def get_model() : model = Sequential() model.add(Conv2D(64,(3, 3), input_shape=(28,28,1), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(rate=0.4)) model.add(Conv2D(96,(4, 4), activation='relu')) model.add(Conv2D(128,(6, 6), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(64)) model.add(Dropout(0.3)) model.add(Dense(10, activation="softmax")) return model model = get_model() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'] )
Digit Recognizer
3,885,820
import pandas as pd import numpy as np import datatable as dt import datetime from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import KFold from lightgbm import LGBMRegressor<load_from_csv>
history = model.fit_generator( train_gen_flow, steps_per_epoch=len(x_train)/BATCH_SIZE, epochs=50, validation_data = val_gen_flow, validation_steps = len(x_val)/BATCH_SIZE )
Digit Recognizer
3,885,820
train = dt.fread('.. /input/tabular-playground-series-feb-2021/train.csv' ).to_pandas() test = dt.fread('.. /input/tabular-playground-series-feb-2021/test.csv' ).to_pandas()<define_variables>
train_df = pd.read_csv('.. /input/train.csv') raw_train_image_ds = np.reshape( train_df.drop('label', axis=1 ).values/255.0, (len(train_df), 28, 28, 1) ) model.evaluate(raw_train_image_ds,train_df.label.values )
Digit Recognizer
3,885,820
cat_col = [c for c in train.columns if 'cat' in c] cont_col = [c for c in train.columns if 'cont' in c]<categorify>
test_df = pd.read_csv('.. /input/test.csv') test_image_ds = np.reshape( test_df.values/255.0, (len(test_df), 28, 28, 1) ) preds = np.round(model.predict(test_image_ds))
Digit Recognizer
3,885,820
for c in cat_col: le = LabelEncoder() train[c] = le.fit_transform(train[c]) test[c] = le.transform(test[c] )<choose_model_class>
sample_submission_df = pd.read_csv('.. /input/sample_submission.csv') sample_submission_df.Label = np.argmax(preds, axis=1) sample_submission_df.head()
Digit Recognizer
3,885,820
kfold = KFold(5, True, random_state = 87 )<prepare_x_and_y>
sample_submission_df.to_csv('submission.csv', index=False )
Digit Recognizer
4,165,577
X = train[cat_col + cont_col] y = train['target'] X_test = test[cat_col + cont_col]<init_hyperparams>
import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, BatchNormalization from keras.layers import Conv2D, MaxPooling2D from keras import backend as K from keras.callbacks import ModelCheckpoint, EarlyStopping from keras.optimizers import Adam from keras.utils import np_utils from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau
Digit Recognizer
4,165,577
lgbm_params = { 'bagging_freq': 1, 'reg_alpha': 2.4766410381355457, 'reg_lambda': 2.644144282261626, 'colsample_bytree': 0.3, 'subsample': 0.6, 'learning_rate': 0.008, 'max_depth': 20, 'num_leaves': 139, 'min_child_samples': 176, 'random_state': 48, 'n_estimators': 20000, 'metric': 'rmse', 'cat_smooth': 9}<train_model>
from sklearn.model_selection import train_test_split
Digit Recognizer
4,165,577
results = np.zeros(X_test.shape[0]) models = [] loss = [] num = 1 for tr, te in kfold.split(X, y): print(f'{num} Fold Start') X_train, X_val = X.iloc[tr], X.iloc[te] y_train, y_val = y.iloc[tr], y.iloc[te] model = LGBMRegressor(**lgbm_params) model.fit(X_train, y_train, eval_set=(X_val, y_val), eval_metric = 'rmse', early_stopping_rounds = 500, verbose=500) results += model.predict(X_test) loss.append(model.best_score_['valid_0']['rmse']) models.append(model) num += 1 results = results / len(models) loss = np.mean(loss )<train_model>
from sklearn.model_selection import train_test_split
Digit Recognizer
4,165,577
print(loss )<load_from_csv>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") y_train = train["label"] x_train = train.drop(labels = ["label"],axis = 1) y_train.value_counts()
Digit Recognizer
4,165,577
submission = pd.read_csv('.. /input/tabular-playground-series-feb-2021/sample_submission.csv') submission['target'] = results submission<save_to_csv>
x_train = x_train / 255.0 test = test / 255.0
Digit Recognizer
4,165,577
now = datetime.datetime.now().strftime('%Y-%m-%d:%H:%M') submission.to_csv(f'./{now}_submission.csv', index= False )<load_from_csv>
random_seed = 2
Digit Recognizer
4,165,577
train = pd.read_csv(input_path / 'train.csv', index_col='id') display(train.head() )<load_from_csv>
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.1, random_state=random_seed )
Digit Recognizer
4,165,577
test = pd.read_csv(input_path / 'test.csv', index_col='id') display(test.head() )<load_from_csv>
print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_val.shape[0], 'test samples') y_train = np_utils.to_categorical(y_train) y_val = np_utils.to_categorical(y_val) print("Number of Classes: " + str(y_val.shape[1])) num_classes = y_val.shape[1] num_pixels = x_train.shape[1] * x_train.shape[2] model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', padding = "same", input_shape=(28,28,1))) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', padding = "same", input_shape=(28,28,1))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64,(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss = 'categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy']) print(model.summary())
Digit Recognizer
4,165,577
submission = pd.read_csv(input_path / 'sample_submission.csv', index_col='id') display(submission.head() )<categorify>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(x_train )
Digit Recognizer
4,165,577
for c in train.columns: if train[c].dtype=='object': lbl = LabelEncoder() lbl.fit(list(train[c].values)+ list(test[c].values)) train[c] = lbl.transform(train[c].values) test[c] = lbl.transform(test[c].values) display(train.head() )<drop_column>
batch_size = 128 epochs = 50 learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, factor=0.5, min_lr=0.00001) earlystop = EarlyStopping(monitor = 'val_loss', min_delta = 0, patience = 5, verbose = 1, restore_best_weights = True) callbacks = [earlystop, learning_rate_reduction] history = model.fit_generator(datagen.flow(x_train,y_train, batch_size=batch_size), epochs=epochs, verbose=1, callbacks=callbacks, steps_per_epoch = x_train.shape[0] // batch_size, validation_data =(x_val, y_val)) score = model.evaluate(x_val, y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1] )
Digit Recognizer
4,165,577
target = train.pop('target' )<normalization>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
4,165,577
scaler = StandardScaler() train = scaler.fit_transform(train) test = scaler.transform(test )<create_dataframe>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("2.csv",index=False )
Digit Recognizer
4,187,374
train = DataFrame(train) test = DataFrame(test )<split>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
4,187,374
X_train, X_test, y_train, y_test = train_test_split(train, target, train_size=0.90 )<choose_model_class>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
4,187,374
rf = ensemble.RandomForestRegressor() rf.fit(X_train,y_train) y_preds = rf.predict(X_test) print(mean_squared_error(y_test,y_preds))<compute_train_metric>
X_train = train.drop('label',axis = 1) y_train = train.label
Digit Recognizer
4,187,374
lgbm = LGBMRegressor() lgbm.fit(X_train,y_train) y_pred = lgbm.predict(X_test) mse_l = mean_squared_error(y_test,y_pred) print(mse_l )<compute_train_metric>
y_train.value_counts()
Digit Recognizer
4,187,374
xgr = xg.XGBRegressor() xgr.fit(X_train,y_train) y_preds = xgr.predict(X_test) print(mean_squared_error(y_test,y_preds))<choose_model_class>
X_train.isnull().any().sum()
Digit Recognizer
4,187,374
estimators=[('RandomForest', rf),('LightGBM',lgbm),('xgboost', xgr)] ensemble = VotingRegressor(estimators )<predict_on_test>
test.isnull().any().sum()
Digit Recognizer
4,187,374
ensemble.fit(X_train,y_train) y_preds = ensemble.predict(X_test) print(mean_squared_error(y_test,y_preds))<save_to_csv>
X_train /= 255. test /= 255 .
Digit Recognizer
4,187,374
lgbm.fit(train, target) submission['target'] = lgbm.predict(test) submission.to_csv('lgbm.csv') <load_from_csv>
y_train = to_categorical(y_train, num_classes = 10 )
Digit Recognizer
4,187,374
BASE = ".. /input/tabular-playground-series-feb-2021" Test = pd.read_csv(BASE + '/test.csv') train = pd.read_csv(BASE + '/train.csv') sample_sub = pd.read_csv(BASE + '/sample_submission.csv' )<import_modules>
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state= 3 )
Digit Recognizer
4,187,374
import matplotlib import matplotlib.pyplot as plt import seaborn as sns <set_options>
from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
Digit Recognizer
4,187,374
sns.set_theme() <count_missing_values>
model = Sequential() model.add(Conv2D(filters = 16, kernel_size =(3, 3), activation='relu', input_shape =(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(filters = 16, kernel_size =(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(strides=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax'))
Digit Recognizer
4,187,374
print('Rows and Columns in train dataset:', sum(train.isnull().sum())) print('Rows and Columns in test dataset:', sum(Test.isnull().sum()))<define_variables>
optimizer = Adamax(lr=0.001 )
Digit Recognizer
4,187,374
cat_features = [feature for feature in train.columns if 'cat' in feature] cont_features = [feature for feature in train.columns if 'cont' in feature]<set_options>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
4,187,374
warnings.filterwarnings('ignore') <count_values>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
4,187,374
Count_diagram(train )<count_values>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1) datagen.fit(X_train )
Digit Recognizer
4,187,374
Count_diagram(Test )<import_modules>
%%time model.fit_generator(datagen.flow(X_train,y_train, batch_size= 86), epochs = 100, validation_data =(X_val,y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // 86, callbacks=[learning_rate_reduction] )
Digit Recognizer
4,187,374
shap.initjs()<prepare_x_and_y>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
4,187,374
<categorify><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
4,361,589
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<install_modules>
%matplotlib inline np.random.seed(2) sns.set(style='white', context='notebook', palette='deep' )
Digit Recognizer