kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
4,566,279 |
high_corr_col = filter_correlation(train, 0.7)
high_corr_col<drop_column>
|
results = pd.Series(Y_pred, name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_predictions.csv",index=False )
|
Digit Recognizer
|
3,437,991 |
train = train.drop(['1stFlrSF', 'GarageArea', 'TotRmsAbvGrd'], axis = 1)
test = test.drop(['1stFlrSF', 'GarageArea', 'TotRmsAbvGrd'], axis = 1 )<drop_column>
|
df_train = pd.read_csv('.. /input/train.csv')
df_test = pd.read_csv('.. /input/test.csv' )
|
Digit Recognizer
|
3,437,991 |
train.drop(['MiscVal', 'MoSold','YrSold'], axis = 1, inplace = True)
test.drop(['MiscVal', 'MoSold','YrSold'], axis = 1, inplace = True )<prepare_x_and_y>
|
y_train = df_train['label']
x_train = df_train.drop(labels = ['label'] , axis=1)
del df_train
|
Digit Recognizer
|
3,437,991 |
X = train.drop(['SalePrice'], axis = 1)
col_to_use = list(X.columns)
y = train['SalePrice']
print(X.shape)
print(y.shape )<define_variables>
|
y_train.value_counts()
|
Digit Recognizer
|
3,437,991 |
num_cols = [col for col in col_to_use if train[col].dtype in ['int64', 'float64']]
cat_cols = [col for col in col_to_use if train[col].dtype == 'object']
num_cols<choose_model_class>
|
x_train = x_train/255.0
df_test = df_test/255.0
|
Digit Recognizer
|
3,437,991 |
num_processor = Pipeline(steps = [
('imputer', SimpleImputer(strategy='most_frequent')) ,
('scaler', MinMaxScaler())
] )<categorify>
|
y_train = to_categorical(y_train , num_classes = 10 )
|
Digit Recognizer
|
3,437,991 |
cat_processor = Pipeline(steps = [
('imputer', SimpleImputer(strategy = 'most_frequent')) ,
('ohe', OneHotEncoder(handle_unknown = 'ignore', sparse = False))
] )<feature_engineering>
|
x_train , x_val , y_train , y_val = train_test_split(x_train , y_train , test_size =.1 , random_state = 0 )
|
Digit Recognizer
|
3,437,991 |
preprocessor = ColumnTransformer([
('num', num_processor, num_cols),
('cat', cat_processor, cat_cols)
] )<split>
|
classifier = Sequential()
classifier.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
classifier.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
classifier.add(MaxPool2D(pool_size=(2,2)))
classifier.add(Dropout(0.25))
classifier.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
classifier.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
classifier.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
classifier.add(Dropout(0.25))
classifier.add(Flatten())
classifier.add(Dense(256, activation = "relu"))
classifier.add(Dropout(0.5))
classifier.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
3,437,991 |
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42 )<choose_model_class>
|
classifier.compile(optimizer = 'adam' , loss = "categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
3,437,991 |
model = LinearRegression()
model1 = Lasso()
model2 = Ridge()
model3 = DecisionTreeRegressor(max_leaf_nodes = 30, random_state = 42)
model4 = RandomForestRegressor(n_estimators = 500, random_state = 42)
model5 = XGBRegressor(n_estimators = 1000, learning_rate = 0.05, random_state = 42)
model6 = GradientBoostingRegressor(learning_rate = 0.1, n_estimators = 165, random_state=42)
model7 = CatBoostRegressor(random_state=0,verbose=False, depth = 5, early_stopping_rounds=300, learning_rate= 0.1 )<compute_train_metric>
|
epochs = 30
batch_size = 126
|
Digit Recognizer
|
3,437,991 |
def build_model(model):
clf = Pipeline(steps = [
('preprocessor', preprocessor),
('model', model)
])
clf.fit(X_train, y_train)
print(model)
print("Train set score:", clf.score(X_train, y_train))
print("Test set score:", clf.score(X_test, y_test))
print("
")
print("Train set rmse:", mean_squared_error(y_train, clf.predict(X_train),squared=False))
print("Test set rmse:", mean_squared_error(y_test, clf.predict(X_test),squared=False))
print("
")
predictions = clf.predict(test)
return predictions<load_pretrained>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(x_train )
|
Digit Recognizer
|
3,437,991 |
predictions = build_model(model7 )<prepare_output>
|
history = classifier.fit_generator(datagen.flow(x_train,y_train, batch_size=batch_size),
epochs = epochs, validation_data =(x_val,y_val),
verbose = 2, steps_per_epoch=x_train.shape[0] // batch_size
)
|
Digit Recognizer
|
3,437,991 |
predictions = np.exp(predictions )<save_to_csv>
|
results = classifier.predict(df_test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
3,437,991 |
output = pd.DataFrame({'Id': Id, 'SalePrice': predictions})
output.to_csv('submission.csv', index = False)
sub = pd.read_csv('./submission.csv')
sub<import_modules>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False )
|
Digit Recognizer
|
1,172,238 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
import sklearn.metrics as metrics
import math<load_from_csv>
|
train_df = pd.read_csv('.. /input/train.csv')
test_df = pd.read_csv('.. /input/test.csv')
train_df.head(5 )
|
Digit Recognizer
|
1,172,238 |
sample_submission = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/sample_submission.csv")
test = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/test.csv")
train = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/train.csv")
c_test = test.copy()
c_train = train.copy()<concatenate>
|
train = train_df.values
test = test_df.values
trainX = train[:, 1:].reshape(train.shape[0], 28, 28, 1)
trainX = trainX.astype(float)
trainX /= 255.0
|
Digit Recognizer
|
1,172,238 |
c_train['train'] = 1
c_test['train'] = 0
df = pd.concat([c_train, c_test], axis=0,sort=False )<create_dataframe>
|
trainY = kutils.to_categorical(train[:, 0])
class_num = trainY.shape[1]
print(class_num )
|
Digit Recognizer
|
1,172,238 |
NAN = [(c, df[c].isna().mean() *100)for c in df]
NAN = pd.DataFrame(NAN, columns=["column_name", "percentage"] )<sort_values>
|
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
|
Digit Recognizer
|
1,172,238 |
NAN = NAN[NAN.percentage > 50]
NAN.sort_values("percentage", ascending=False )<drop_column>
|
model = Sequential()
model.add(Conv2D(16,(3, 3), padding='same', activation='relu', input_shape=(28, 28, 1)))
model.add(Conv2D(32,(5, 5), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1)))
model.add(Conv2D(32,(3, 3), padding='same', activation='relu'))
model.add(Conv2D(64,(5, 5), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1)))
model.add(Conv2D(64,(3, 3), padding='same', activation='relu'))
model.add(Conv2D(128,(5, 5), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(128,(3, 3), padding='same', activation='relu'))
model.add(Conv2D(256,(5, 5), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='sigmoid'))
model.add(Dense(class_num, activation='softmax'))
model.compile(loss="categorical_crossentropy", optimizer="sgd", metrics=["accuracy"] )
|
Digit Recognizer
|
1,172,238 |
df = df.drop(['Alley','PoolQC','Fence','MiscFeature'],axis=1 )<count_missing_values>
|
model.fit(trainX, trainY, batch_size=64, epochs=100, verbose=2 )
|
Digit Recognizer
|
1,172,238 |
null_counts = object_columns_df.isnull().sum()
print("Number of null values in each column:
{}".format(null_counts))<data_type_conversions>
|
testX = test.reshape(test.shape[0], 28, 28, 1)
testX = testX.astype(float)
testX /= 255.0
yPred = model.predict_classes(testX)
np.savetxt('mnist-cnn.csv', np.c_[range(1,len(yPred)+1),yPred], delimiter=',', header = 'ImageId,Label', comments = '', fmt='%d' )
|
Digit Recognizer
|
1,191,702 |
columns_None = ['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2','GarageType','GarageFinish','GarageQual','FireplaceQu','GarageCond']
object_columns_df[columns_None]= object_columns_df[columns_None].fillna('None' )<categorify>
|
df = pd.read_csv('.. /input/train.csv')
df.head()
|
Digit Recognizer
|
1,191,702 |
columns_with_lowNA = ['MSZoning','Utilities','Exterior1st','Exterior2nd','MasVnrType','Electrical','KitchenQual','Functional','SaleType']
object_columns_df[columns_with_lowNA] = object_columns_df[columns_with_lowNA].fillna(object_columns_df.mode().iloc[0] )<count_missing_values>
|
y = to_categorical(y ).astype("uint8")
print(y.shape )
|
Digit Recognizer
|
1,191,702 |
null_counts = numerical_columns_df.isnull().sum()
print("Number of null values in each column:
{}".format(null_counts))<feature_engineering>
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
del df, X, y
|
Digit Recognizer
|
1,191,702 |
numerical_columns_df['GarageYrBlt'] = numerical_columns_df['GarageYrBlt'].fillna(numerical_columns_df['YrSold']-35)
numerical_columns_df['LotFrontage'] = numerical_columns_df['LotFrontage'].fillna(68 )<drop_column>
|
def create_model() :
model = Sequential()
model.add(Conv2D(32, 5, activation="relu", input_shape=(28, 28, 1)))
model.add(Conv2D(32, 5, activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Conv2D(64, 3, activation="relu", padding='same'))
model.add(Conv2D(64, 3, activation="relu"))
model.add(Dropout(0.4))
model.add(Conv2D(128, 3, activation="relu", padding='same'))
model.add(Conv2D(128, 3, activation="relu"))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(64, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
return model
|
Digit Recognizer
|
1,191,702 |
object_columns_df = object_columns_df.drop(['Heating','RoofMatl','Condition2','Street','Utilities'],axis=1 )<filter>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=2,
factor=0.4,
min_lr=3e-6)
early_stops = EarlyStopping(monitor='val_acc', min_delta=0, patience=6, verbose=2, mode='auto' )
|
Digit Recognizer
|
1,191,702 |
Negatif = numerical_columns_df[numerical_columns_df['Age_House'] < 0]
Negatif<feature_engineering>
|
data_aug = ImageDataGenerator(rotation_range=20, width_shift_range=4, height_shift_range=4, zoom_range=0.1 )
|
Digit Recognizer
|
1,191,702 |
numerical_columns_df['TotalBsmtBath'] = numerical_columns_df['BsmtFullBath'] + numerical_columns_df['BsmtFullBath']*0.5
numerical_columns_df['TotalBath'] = numerical_columns_df['FullBath'] + numerical_columns_df['HalfBath']*0.5
numerical_columns_df['TotalSA']=numerical_columns_df['TotalBsmtSF'] + numerical_columns_df['1stFlrSF'] + numerical_columns_df['2ndFlrSF']<categorify>
|
history = model.fit_generator(data_aug.flow(X_train, y_train, batch_size=128), steps_per_epoch=len(X_train)//128,
validation_data=(X_test, y_test), epochs=100, verbose=1, callbacks=[learning_rate_reduction] )
|
Digit Recognizer
|
1,191,702 |
bin_map = {'TA':2,'Gd':3, 'Fa':1,'Ex':4,'Po':1,'None':0,'Y':1,'N':0,'Reg':3,'IR1':2,'IR2':1,'IR3':0,"None" : 0,
"No" : 2, "Mn" : 2, "Av": 3,"Gd" : 4,"Unf" : 1, "LwQ": 2, "Rec" : 3,"BLQ" : 4, "ALQ" : 5, "GLQ" : 6
}
object_columns_df['ExterQual'] = object_columns_df['ExterQual'].map(bin_map)
object_columns_df['ExterCond'] = object_columns_df['ExterCond'].map(bin_map)
object_columns_df['BsmtCond'] = object_columns_df['BsmtCond'].map(bin_map)
object_columns_df['BsmtQual'] = object_columns_df['BsmtQual'].map(bin_map)
object_columns_df['HeatingQC'] = object_columns_df['HeatingQC'].map(bin_map)
object_columns_df['KitchenQual'] = object_columns_df['KitchenQual'].map(bin_map)
object_columns_df['FireplaceQu'] = object_columns_df['FireplaceQu'].map(bin_map)
object_columns_df['GarageQual'] = object_columns_df['GarageQual'].map(bin_map)
object_columns_df['GarageCond'] = object_columns_df['GarageCond'].map(bin_map)
object_columns_df['CentralAir'] = object_columns_df['CentralAir'].map(bin_map)
object_columns_df['LotShape'] = object_columns_df['LotShape'].map(bin_map)
object_columns_df['BsmtExposure'] = object_columns_df['BsmtExposure'].map(bin_map)
object_columns_df['BsmtFinType1'] = object_columns_df['BsmtFinType1'].map(bin_map)
object_columns_df['BsmtFinType2'] = object_columns_df['BsmtFinType2'].map(bin_map)
PavedDrive = {"N" : 0, "P" : 1, "Y" : 2}
object_columns_df['PavedDrive'] = object_columns_df['PavedDrive'].map(PavedDrive )<categorify>
|
def make_submission(model, filename="submission.csv"):
df = pd.read_csv(".. /input/test.csv")
X = df.values / 255
X = X.reshape(X.shape[0], 28, 28, 1)
preds = model.predict_classes(X)
subm = pd.DataFrame(data=list(zip(range(1, len(preds)+ 1), preds)) , columns=["ImageId", "Label"])
subm.to_csv(filename, index=False)
|
Digit Recognizer
|
1,191,702 |
rest_object_columns = object_columns_df.select_dtypes(include=['object'])
object_columns_df = pd.get_dummies(object_columns_df, columns=rest_object_columns.columns )<concatenate>
|
make_submission(model, "submission.csv" )
|
Digit Recognizer
|
1,191,702 |
<drop_column><EOS>
|
print(f"Finished in {int(time.time() - start_time)} seconds..." )
|
Digit Recognizer
|
52,414 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<prepare_x_and_y>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
Digit Recognizer
|
52,414 |
target= df_train['SalePrice']
df_train = df_train.drop(['SalePrice'],axis=1 )<split>
|
print(tf.__version__ )
|
Digit Recognizer
|
52,414 |
x_train,x_test,y_train,y_test = train_test_split(df_train,target,test_size=0.33,random_state=0 )<choose_model_class>
|
train_data = pd.read_csv('.. /input/train.csv')
test_data = pd.read_csv('.. /input/test.csv' )
|
Digit Recognizer
|
52,414 |
xgb =XGBRegressor(booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.6, gamma=0,
importance_type='gain', learning_rate=0.01, max_delta_step=0,
max_depth=4, min_child_weight=1.5, n_estimators=2400,
n_jobs=1, nthread=None, objective='reg:linear',
reg_alpha=0.6, reg_lambda=0.6, scale_pos_weight=1,
silent=None, subsample=0.8, verbosity=1)
lgbm = LGBMRegressor(objective='regression',
num_leaves=4,
learning_rate=0.01,
n_estimators=12000,
max_bin=200,
bagging_fraction=0.75,
bagging_freq=5,
bagging_seed=7,
feature_fraction=0.4,
)<train_model>
|
train_data = train_data.values
test_data = test_data.values
|
Digit Recognizer
|
52,414 |
xgb.fit(x_train, y_train)
lgbm.fit(x_train, y_train,eval_metric='rmse' )<predict_on_test>
|
np.random.shuffle(train_data )
|
Digit Recognizer
|
52,414 |
predict1 = xgb.predict(x_test)
predict = lgbm.predict(x_test )<compute_test_metric>
|
train_digits = train_digits / 255.0
val_digits = val_digits / 255.0
test_digits = test_digits / 255.0
|
Digit Recognizer
|
52,414 |
print('Root Mean Square Error test = ' + str(math.sqrt(metrics.mean_squared_error(y_test, predict1))))
print('Root Mean Square Error test = ' + str(math.sqrt(metrics.mean_squared_error(y_test, predict))))<train_model>
|
X0 = Input(shape =(28,28,1))
X = Conv2D(filters = 32, kernel_size = 3, padding = 'Same', activation ='relu' )(X0)
X = Conv2D(filters = 32, kernel_size = 3, padding = 'Same', activation ='relu' )(X)
X = MaxPooling2D(pool_size = 2, strides = 2 )(X)
X = Dropout(0.25 )(X)
X = Conv2D(filters = 64, kernel_size = 3, padding = 'Same', activation ='relu' )(X)
X = Conv2D(filters = 64, kernel_size = 3, padding = 'Same', activation ='relu' )(X)
X = MaxPooling2D(pool_size = 2, strides = 2 )(X)
X = Dropout(0.25 )(X)
X = Flatten()(X)
X = Dense(512, activation = "relu" )(X)
X = Dropout(0.50 )(X)
Out = Dense(10, activation = "softmax" )(X )
|
Digit Recognizer
|
52,414 |
xgb.fit(df_train, target)
lgbm.fit(df_train, target,eval_metric='rmse' )<predict_on_test>
|
model = Model(inputs=X0, outputs=Out)
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
initial_model_weights = model.get_weights()
|
Digit Recognizer
|
52,414 |
predict4 = lgbm.predict(df_test)
predict3 = xgb.predict(df_test)
predict_y =(predict3*0.45 + predict4 * 0.55 )<save_to_csv>
|
history = model.fit(train_digits, train_labels,
epochs=10,
verbose=2,
validation_data=(val_digits,val_labels))
|
Digit Recognizer
|
52,414 |
submission = pd.DataFrame({
"Id": test["Id"],
"SalePrice": predict_y
})
submission.to_csv('submission.csv', index=False )<import_modules>
|
model.save("cnn1.h5" )
|
Digit Recognizer
|
52,414 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
import sklearn.metrics as metrics
import math<load_from_csv>
|
predictions = model.predict(test_digits)
predictions = np.argmax(predictions,axis = 1)
predictions = pd.Series(predictions,name = 'Label')
ids = pd.Series(range(1,28001),name = 'ImageId')
predictions = pd.concat([predictions,ids],axis = 1)
predictions.to_csv('pred1.csv',index = False)
del(predictions)
del(ids )
|
Digit Recognizer
|
52,414 |
sample_submission = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/sample_submission.csv")
test = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/test.csv")
train = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/train.csv")
c_test = test.copy()
c_train = train.copy()<concatenate>
|
model.set_weights(initial_model_weights )
|
Digit Recognizer
|
52,414 |
c_train['train'] = 1
c_test['train'] = 0
df = pd.concat([c_train, c_test], axis=0,sort=False )<create_dataframe>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0)
datagen.fit(train_digits )
|
Digit Recognizer
|
52,414 |
NAN = [(c, df[c].isna().mean() *100)for c in df]
NAN = pd.DataFrame(NAN, columns=["column_name", "percentage"] )<sort_values>
|
batch_size = 64
gen_history = model.fit_generator(datagen.flow(train_digits, train_labels, batch_size=batch_size),
epochs=10,
verbose=2,
steps_per_epoch=train_digits.shape[0]/batch_size,
validation_data=(val_digits,val_labels))
plot_history(gen_history.history )
|
Digit Recognizer
|
52,414 |
<drop_column><EOS>
|
predictions = model.predict(test_digits)
predictions = np.argmax(predictions,axis = 1)
predictions = pd.Series(predictions,name = 'Label')
ids = pd.Series(range(1,28001),name = 'ImageId')
predictions = pd.concat([predictions,ids],axis = 1)
predictions.to_csv('pred2.csv',index = False)
del(predictions)
del(ids)
model.save("cnn2.h5" )
|
Digit Recognizer
|
4,430,782 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_missing_values>
|
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
|
Digit Recognizer
|
4,430,782 |
null_counts = object_columns_df.isnull().sum()
print("Number of null values in each column:
{}".format(null_counts))<data_type_conversions>
|
training_images = pd.read_csv('.. /input/train.csv')
test_images = pd.read_csv('.. /input/test.csv')
training_labels = training_images['label']
training_images = training_images.drop(labels = ['label'], axis = 1)
training_images = training_images.values.reshape(42000, 28, 28, 1)
test_images = test_images.values.reshape(28000, 28, 28, 1)
training_images = training_images / 255.0
test_images = test_images / 255.0
|
Digit Recognizer
|
4,430,782 |
columns_None = ['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2','GarageType','GarageFinish','GarageQual','FireplaceQu','GarageCond']
object_columns_df[columns_None]= object_columns_df[columns_None].fillna('None' )<categorify>
|
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32,(3, 3), activation='relu', input_shape=(28, 28, 1)) ,
tf.keras.layers.Conv2D(32,(3, 3), activation='relu', input_shape=(28, 28, 1)) ,
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(64,(3, 3), activation='relu', input_shape=(28, 28, 1)) ,
tf.keras.layers.Conv2D(64,(3, 3), activation='relu', input_shape=(28, 28, 1)) ,
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.fit(training_images, training_labels, epochs=15 )
|
Digit Recognizer
|
4,430,782 |
columns_with_lowNA = ['MSZoning','Utilities','Exterior1st','Exterior2nd','MasVnrType','Electrical','KitchenQual','Functional','SaleType']
object_columns_df[columns_with_lowNA] = object_columns_df[columns_with_lowNA].fillna(object_columns_df.mode().iloc[0] )<count_missing_values>
|
predict = model.predict(test_images)
predict_array = np.argmax(predict, axis=1)
predict_array = predict_array.tolist()
|
Digit Recognizer
|
4,430,782 |
null_counts = numerical_columns_df.isnull().sum()
print("Number of null values in each column:
{}".format(null_counts))<feature_engineering>
|
image_set = pd.read_csv('.. /input/test.csv')
for i in np.random.randint(28001, size=10):
print("Predicted : " + str(predict_array[i]))
image = image_set.iloc[i].values.reshape(28, 28)
plt.imshow(image, cmap='gray')
plt.show()
|
Digit Recognizer
|
4,430,782 |
numerical_columns_df['GarageYrBlt'] = numerical_columns_df['GarageYrBlt'].fillna(numerical_columns_df['YrSold']-35)
numerical_columns_df['LotFrontage'] = numerical_columns_df['LotFrontage'].fillna(68 )<drop_column>
|
data_to_submit = pd.DataFrame({
'ImageId': range(1, 28001),
'Label': predict_array
})
data_to_submit.to_csv('csv_to_submit3.csv', index = False )
|
Digit Recognizer
|
4,845,532 |
object_columns_df = object_columns_df.drop(['Heating','RoofMatl','Condition2','Street','Utilities'],axis=1)
<filter>
|
class Dataset(Dataset):
def __init__(self, path, transform=None):
self.data = pd.read_csv(path)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = self.data.iloc[index]
image = item[1:].values.astype(np.uint8 ).reshape(( 28, 28))
label = item[0]
if self.transform is not None:
image = self.transform(image)
return image, label
|
Digit Recognizer
|
4,845,532 |
Negatif = numerical_columns_df[numerical_columns_df['Age_House'] < 0]
Negatif<feature_engineering>
|
path = '.. /input/train.csv'
VALID_SIZE = 0.2
train_transform = transforms.Compose([
transforms.ToPILImage() ,
transforms.ToTensor() ,
transforms.Normalize(mean=(0.5,), std=(0.5,))
])
valid_transform = transforms.Compose([
transforms.ToPILImage() ,
transforms.ToTensor() ,
transforms.Normalize(mean=(0.5,), std=(0.5,))
])
train_data = Dataset(path, train_transform)
valid_data = Dataset(path, valid_transform)
trainloader = DataLoader(train_data, batch_size = 1500, shuffle = True)
testloader = DataLoader(valid_data, batch_size = 1500, shuffle = False)
len(trainloader )
|
Digit Recognizer
|
4,845,532 |
numerical_columns_df['TotalBsmtBath'] = numerical_columns_df['BsmtFullBath'] + numerical_columns_df['BsmtFullBath']*0.5
numerical_columns_df['TotalBath'] = numerical_columns_df['FullBath'] + numerical_columns_df['HalfBath']*0.5
numerical_columns_df['TotalSA']=numerical_columns_df['TotalBsmtSF'] + numerical_columns_df['1stFlrSF'] + numerical_columns_df['2ndFlrSF']<categorify>
|
class Net(nn.Module):
def __init__(self):
super(Net, self ).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 32, 3, padding=1),
mila() ,
nn.BatchNorm2d(32),
nn.Conv2d(32, 32, 3, stride=2, padding=1),
mila() ,
nn.BatchNorm2d(32),
nn.MaxPool2d(2, 2),
nn.Dropout(0.25)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, 3, padding=1),
mila() ,
nn.BatchNorm2d(64),
nn.Conv2d(64, 64, 3, stride=2, padding=1),
mila() ,
nn.BatchNorm2d(64),
nn.Dropout(0.25)
)
self.conv3 = nn.Sequential(
nn.Conv2d(64, 128, 3, padding=1),
mila() ,
nn.BatchNorm2d(128),
nn.Conv2d(128, 128, 3, stride=2, padding=1),
mila() ,
nn.BatchNorm2d(128),
nn.Dropout(0.25)
)
self.conv4 = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
mila() ,
nn.BatchNorm2d(256),
nn.MaxPool2d(2, 2),
nn.Dropout(0.25)
)
self.fc = nn.Sequential(
nn.Linear(256, 256),
nn.Dropout(0.3),
mila() ,
nn.Linear(256, 256),
nn.Dropout(0.4),
mila() ,
nn.Linear(256, 10),
nn.Softmax(dim=1)
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.view(x.size(0), -1)
return self.fc(x )
|
Digit Recognizer
|
4,845,532 |
bin_map = {'TA':2,'Gd':3, 'Fa':1,'Ex':4,'Po':1,'None':0,'Y':1,'N':0,'Reg':3,'IR1':2,'IR2':1,'IR3':0,"None" : 0,
"No" : 2, "Mn" : 2, "Av": 3,"Gd" : 4,"Unf" : 1, "LwQ": 2, "Rec" : 3,"BLQ" : 4, "ALQ" : 5, "GLQ" : 6
}
object_columns_df['ExterQual'] = object_columns_df['ExterQual'].map(bin_map)
object_columns_df['ExterCond'] = object_columns_df['ExterCond'].map(bin_map)
object_columns_df['BsmtCond'] = object_columns_df['BsmtCond'].map(bin_map)
object_columns_df['BsmtQual'] = object_columns_df['BsmtQual'].map(bin_map)
object_columns_df['HeatingQC'] = object_columns_df['HeatingQC'].map(bin_map)
object_columns_df['KitchenQual'] = object_columns_df['KitchenQual'].map(bin_map)
object_columns_df['FireplaceQu'] = object_columns_df['FireplaceQu'].map(bin_map)
object_columns_df['GarageQual'] = object_columns_df['GarageQual'].map(bin_map)
object_columns_df['GarageCond'] = object_columns_df['GarageCond'].map(bin_map)
object_columns_df['CentralAir'] = object_columns_df['CentralAir'].map(bin_map)
object_columns_df['LotShape'] = object_columns_df['LotShape'].map(bin_map)
object_columns_df['BsmtExposure'] = object_columns_df['BsmtExposure'].map(bin_map)
object_columns_df['BsmtFinType1'] = object_columns_df['BsmtFinType1'].map(bin_map)
object_columns_df['BsmtFinType2'] = object_columns_df['BsmtFinType2'].map(bin_map)
PavedDrive = {"N" : 0, "P" : 1, "Y" : 2}
object_columns_df['PavedDrive'] = object_columns_df['PavedDrive'].map(PavedDrive)
<categorify>
|
!wget https://raw.githubusercontent.com/LiyuanLucasLiu/RAdam/master/cifar_imagenet/utils/radam.py
|
Digit Recognizer
|
4,845,532 |
rest_object_columns = object_columns_df.select_dtypes(include=['object'])
object_columns_df = pd.get_dummies(object_columns_df, columns=rest_object_columns.columns )<concatenate>
|
model = Net()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = radam.RAdam(model.parameters() , lr=0.00159 )
|
Digit Recognizer
|
4,845,532 |
df_final = pd.concat([object_columns_df, numerical_columns_df], axis=1,sort=False)
df_final.head()<drop_column>
|
total_epoch = 50
|
Digit Recognizer
|
4,845,532 |
df_final = df_final.drop(['Id',],axis=1)
df_train = df_final[df_final['train'] == 1]
df_train = df_train.drop(['train',],axis=1)
df_test = df_final[df_final['train'] == 0]
df_test = df_test.drop(['SalePrice'],axis=1)
df_test = df_test.drop(['train',],axis=1 )<prepare_x_and_y>
|
n_epochs = total_epoch
train_loss_data,valid_loss_data = [],[]
valid_loss_min = np.Inf
class_correct = list(0.for i in range(10))
class_total = list(0.for i in range(10))
for epoch in range(n_epochs):
train_loss = 0.0
valid_loss = 0.0
model.train()
for data, target in trainloader:
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()
model.eval()
for data, target in testloader:
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
valid_loss += loss.item()
_, pred = torch.max(output, 1)
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
for i in range(16):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
train_loss = train_loss/len(trainloader.dataset)
valid_loss = valid_loss/len(testloader.dataset)
train_loss_data.append(train_loss)
valid_loss_data.append(valid_loss)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch+1,
train_loss,
valid_loss
))
print('\t\tTest Accuracy: %4d%%(%2d/%2d)' %(
100.* np.sum(class_correct)/ np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
if valid_loss <= valid_loss_min:
print('\t\tValidation loss decreased({:.6f} --> {:.6f} ).Saving model...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict() , 'model.pt')
valid_loss_min = valid_loss
|
Digit Recognizer
|
4,845,532 |
target= df_train['SalePrice']
df_train = df_train.drop(['SalePrice'],axis=1 )<split>
|
model.load_state_dict(torch.load('model.pt'))
|
Digit Recognizer
|
4,845,532 |
x_train,x_test,y_train,y_test = train_test_split(df_train,target,test_size=0.33,random_state=0 )<choose_model_class>
|
classes = ['0', '1', '2', '3', '4',
'5', '6', '7', '8', '9']
test_loss = 0.0
class_correct = list(0.for i in range(10))
class_total = list(0.for i in range(10))
with torch.no_grad() :
model.eval()
for data, target in testloader:
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
test_loss += loss.item() *data.size(0)
_, pred = torch.max(output, 1)
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
for i in range(16):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
test_loss = test_loss/len(testloader.dataset)
print('Test Loss: {:.6f}
'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%%(%2d/%2d)' %(
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A(no training examples)' %(classes[i]))
print('
Test Accuracy(Overall): %2d%%(%2d/%2d)' %(
100.* np.sum(class_correct)/ np.sum(class_total),
np.sum(class_correct), np.sum(class_total)) )
|
Digit Recognizer
|
4,845,532 |
xgb =XGBRegressor(booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.6, gamma=0,
importance_type='gain', learning_rate=0.01, max_delta_step=0,
max_depth=4, min_child_weight=1.5, n_estimators=2400,
n_jobs=1, nthread=None, objective='reg:linear',
reg_alpha=0.6, reg_lambda=0.6, scale_pos_weight=1,
silent=None, subsample=0.8, verbosity=1)
lgbm = LGBMRegressor(objective='regression',
num_leaves=4,
learning_rate=0.01,
n_estimators=12000,
max_bin=200,
bagging_fraction=0.75,
bagging_freq=5,
bagging_seed=7,
feature_fraction=0.4,
)<train_model>
|
class DatasetSubmissionMNIST(torch.utils.data.Dataset):
def __init__(self, file_path, transform=None):
self.data = pd.read_csv(file_path)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
image = self.data.iloc[index].values.astype(np.uint8 ).reshape(( 28, 28, 1))
if self.transform is not None:
image = self.transform(image)
return image
|
Digit Recognizer
|
4,845,532 |
xgb.fit(x_train, y_train)
lgbm.fit(x_train, y_train,eval_metric='rmse')
<predict_on_test>
|
transform = transforms.Compose([
transforms.ToPILImage() ,
transforms.ToTensor() ,
transforms.Normalize(mean=(0.5,), std=(0.5,))
])
submissionset = DatasetSubmissionMNIST('.. /input/test.csv', transform=transform)
submissionloader = torch.utils.data.DataLoader(submissionset, batch_size=128, shuffle=False )
|
Digit Recognizer
|
4,845,532 |
predict1 = xgb.predict(x_test)
predict = lgbm.predict(x_test )<compute_test_metric>
|
submission = [['ImageId', 'Label']]
with torch.no_grad() :
model.eval()
image_id = 1
for images in submissionloader:
images = images.to(device)
log_ps = model(images)
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
for prediction in top_class:
submission.append([image_id, prediction.item() ])
image_id += 1
print(len(submission)- 1 )
|
Digit Recognizer
|
4,845,532 |
<train_model><EOS>
|
with open('submission.csv', 'w')as submissionFile:
writer = csv.writer(submissionFile)
writer.writerows(submission)
print('Submission Complete!' )
|
Digit Recognizer
|
6,925,490 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<predict_on_test>
|
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
Digit Recognizer
|
6,925,490 |
predict4 = lgbm.predict(df_test)
predict3 = xgb.predict(df_test)
predict_y =(predict3*0.45 + predict4 * 0.55 )<save_to_csv>
|
mnist_train_complete = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
mnist_test_complete = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
mnist_train_complete.head(5 )
|
Digit Recognizer
|
6,925,490 |
submission = pd.DataFrame({
"Id": test["Id"],
"SalePrice": predict_y
})
submission.to_csv('submission.csv', index=False )<import_modules>
|
train_y = mnist_train_complete.iloc[:, 0].values.astype('int32')
train_x = mnist_train_complete.iloc[:, 1:].values.astype('float32')
test_x = mnist_test_complete.values.astype('float32')
train_x = train_x.reshape(train_x.shape[0], 28, 28)
test_x = test_x.reshape(test_x.shape[0], 28, 28 )
|
Digit Recognizer
|
6,925,490 |
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow_hub as hub<load_from_url>
|
train_x = train_x.astype('float32')/np.max(train_x)
test_x = test_x.astype('float32')/np.max(test_x)
mean = np.std(train_x)
train_x -= mean
mean = np.std(test_x)
test_x -= mean
|
Digit Recognizer
|
6,925,490 |
!wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py<import_modules>
|
splitted_train_X, splitted_test_X, splitted_train_y, splitted_test_y = train_test_split(train_x, train_y, test_size=0.2, random_state=81)
ohe_splitted_train_y = tf_utils.to_categorical(splitted_train_y, 10)
ohe_splitted_test_y = tf_utils.to_categorical(splitted_test_y, 10)
print('One-hot labels:')
print(splitted_train_y[:10] )
|
Digit Recognizer
|
6,925,490 |
import tokenization<categorify>
|
model_sol_1 = tf.keras.models.Sequential()
model_sol_1.add(tf.keras.layers.Flatten(input_shape = splitted_train_X.shape[1:]))
model_sol_1.add(tf.keras.layers.Dense(512, activation='relu'))
model_sol_1.add(tf.keras.layers.Dropout(0.2))
model_sol_1.add(tf.keras.layers.Dense(512, activation='relu'))
model_sol_1.add(tf.keras.layers.Dropout(0.2))
model_sol_1.add(tf.keras.layers.Dense(10, activation='softmax'))
model_sol_1.summary()
|
Digit Recognizer
|
6,925,490 |
def bert_encode(texts, tokenizer, max_len=512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[:max_len-2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence)
tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence)+ [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments )<choose_model_class>
|
model_sol_1.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'] )
|
Digit Recognizer
|
6,925,490 |
def build_model(bert_layer, max_len=512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
out = Dense(1, activation='sigmoid' )(clf_output)
model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(Adam(lr=2e-6), loss='binary_crossentropy', metrics=['accuracy'])
return model<load_from_csv>
|
score = model_sol_1.evaluate(splitted_test_X, ohe_splitted_test_y, verbose=0)
accuracy = 100 * score[1]
print('Test accuracy: %4f%%' % accuracy )
|
Digit Recognizer
|
6,925,490 |
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv" )<choose_model_class>
|
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', verbose=1, save_best_only=True)
hist_sol_1 = model_sol_1.fit(splitted_train_X, ohe_splitted_train_y, batch_size=128, epochs=10,
validation_split=0.2, callbacks=[checkpointer],
verbose=2, shuffle=True )
|
Digit Recognizer
|
6,925,490 |
%%time
module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1"
bert_layer = hub.KerasLayer(module_url, trainable=True )<feature_engineering>
|
model_sol_1.load_weights('mnist.model.best.hdf5')
score = model_sol_1.evaluate(splitted_test_X, ohe_splitted_test_y, verbose=0)
accuracy = 100 * score[1]
print('Test accuracy: %.4f%%' % accuracy )
|
Digit Recognizer
|
6,925,490 |
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case )<categorify>
|
predictions = model_sol_1.predict(test_x)
predictions = [ np.argmax(x)for x in predictions ]
|
Digit Recognizer
|
6,925,490 |
train_input = bert_encode(train.text.values, tokenizer, max_len=160)
test_input = bert_encode(test.text.values, tokenizer, max_len=160)
train_labels = train.target.values<train_model>
|
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
submission.drop('Label', axis=1, inplace=True)
submission['Label'] = predictions
submission.to_csv('submission1.csv', index=False )
|
Digit Recognizer
|
6,925,490 |
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
train_history = model.fit(
train_input, train_labels,
validation_split=0.2,
epochs=20,
batch_size=8,
callbacks=[callback]
)
model.save('model_bert.h5' )<predict_on_test>
|
extended_splitted_train_X = splitted_train_X[..., tf.newaxis]
extended_splitted_test_X = splitted_test_X[..., tf.newaxis]
extended_splitted_test_X.shape
|
Digit Recognizer
|
6,925,490 |
prediction= model.predict(test_input )<save_to_csv>
|
model_sol_2 = Sequential()
model_sol_2.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', input_shape=extended_splitted_train_X.shape[1:]))
model_sol_2.add(MaxPooling2D(pool_size=2))
model_sol_2.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
model_sol_2.add(MaxPooling2D(pool_size=2))
model_sol_2.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
model_sol_2.add(MaxPooling2D(pool_size=2))
model_sol_2.add(Flatten())
model_sol_2.add(Dense(64))
model_sol_2.add(Activation('relu'))
model_sol_2.add(Dropout(0.2))
model_sol_2.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
6,925,490 |
submission['target'] = prediction.round().astype(int)
submission.to_csv('submission.csv', index=False )<train_model>
|
model_sol_2.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'] )
|
Digit Recognizer
|
6,925,490 |
train_history = model.fit(
train_input, train_labels,
validation_split=0.2,
epochs=2,
batch_size=8
)
model.save('model_bert.h5' )<set_options>
|
score = model_sol_2.evaluate(extended_splitted_test_X, ohe_splitted_test_y, verbose=0)
accuracy = 100 * score[1]
print('Test accuracy: %4f%%' % accuracy )
|
Digit Recognizer
|
6,925,490 |
py.init_notebook_mode(connected=True)
pio.templates.default = "plotly_dark"
pd.set_option('max_columns', 50)
<install_modules>
|
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', verbose=1, save_best_only=True)
hist_sol_2 = model_sol_2.fit(extended_splitted_train_X, ohe_splitted_train_y, batch_size=128,
epochs=10, callbacks=[checkpointer],
verbose=2, validation_data=(extended_splitted_test_X, ohe_splitted_test_y), shuffle=True )
|
Digit Recognizer
|
6,925,490 |
!pip install detectron2 -f \
https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.7/index.html
!pip install pytorch-pfn-extras timm<load_pretrained>
|
model_sol_2.load_weights('mnist.model.best.hdf5')
score = model_sol_2.evaluate(extended_splitted_test_X, ohe_splitted_test_y, verbose=0)
accuracy = 100 * score[1]
print('Test accuracy: %.4f%%' % accuracy )
|
Digit Recognizer
|
6,925,490 |
def save_yaml(filepath: str, content: Any, width: int = 120):
with open(filepath, "w")as f:
yaml.dump(content, f, width=width )<init_hyperparams>
|
extended_test_x = test_x[..., tf.newaxis]
predictions = model_sol_2.predict(extended_test_x)
predictions = [ np.argmax(x)for x in predictions ]
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
submission.drop('Label', axis=1, inplace=True)
submission['Label'] = predictions
submission.to_csv('submission2.csv', index=False )
|
Digit Recognizer
|
6,925,490 |
@dataclass
class Flags:
debug: bool = True
outdir: str = "results/det"
device: str = "cuda:0"
imgdir_name: str = "vinbigdata-chest-xray-resized-png-256x256"
seed: int = 111
target_fold: int = 0
label_smoothing: float = 0.0
model_name: str = "resnet18"
model_mode: str = "normal"
epoch: int = 20
batchsize: int = 8
valid_batchsize: int = 16
num_workers: int = 4
snapshot_freq: int = 5
ema_decay: float = 0.999
scheduler_type: str = ""
scheduler_kwargs: Dict[str, Any] = field(default_factory=lambda: {})
scheduler_trigger: List[Union[int, str]] = field(default_factory=lambda: [1, "iteration"])
aug_kwargs: Dict[str, Dict[str, Any]] = field(default_factory=lambda: {})
mixup_prob: float = -1.0
def update(self, param_dict: Dict)-> "Flags":
for key, value in param_dict.items() :
if not hasattr(self, key):
raise ValueError(f"[ERROR] Unexpected key for flag = {key}")
setattr(self, key, value)
return self<init_hyperparams>
|
image_augmentator = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.2,
zoom_range=0.1,
fill_mode='nearest')
batch_size = 32
train_batches = image_augmentator.flow(extended_splitted_train_X, ohe_splitted_train_y, batch_size=batch_size)
val_batches = image_augmentator.flow(extended_splitted_test_X, ohe_splitted_test_y, batch_size=batch_size )
|
Digit Recognizer
|
6,925,490 |
flags_dict = {
"debug": False,
"outdir": "results/tmp_debug",
"imgdir_name": "vinbigdata-chest-xray-resized-png-256x256",
"model_name": "resnet18",
"num_workers": 4,
"epoch": 15,
"batchsize": 8,
"scheduler_type": "CosineAnnealingWarmRestarts",
"scheduler_kwargs": {"T_0": 28125},
"scheduler_trigger": [1, "iteration"],
"aug_kwargs": {
"HorizontalFlip": {"p": 0.5},
"ShiftScaleRotate": {"scale_limit": 0.15, "rotate_limit": 10, "p": 0.5},
"RandomBrightnessContrast": {"p": 0.5},
"CoarseDropout": {"max_holes": 8, "max_height": 25, "max_width": 25, "p": 0.5},
"Blur": {"blur_limit": [3, 7], "p": 0.5},
"Downscale": {"scale_min": 0.25, "scale_max": 0.9, "p": 0.3},
"RandomGamma": {"gamma_limit": [80, 120], "p": 0.6},
}
}<load_from_csv>
|
model_sol_3 = Sequential()
model_sol_3.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', input_shape=extended_splitted_train_X.shape[1:]))
model_sol_3.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
model_sol_3.add(MaxPooling2D(pool_size=2))
model_sol_3.add(Dropout(0.1))
model_sol_3.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
model_sol_3.add(MaxPooling2D(pool_size=2))
model_sol_3.add(Flatten())
model_sol_3.add(Dense(64))
model_sol_3.add(Activation('relu'))
model_sol_3.add(Dropout(0.2))
model_sol_3.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
6,925,490 |
print("torch", torch.__version__)
flags = Flags().update(flags_dict)
print("flags", flags)
debug = flags.debug
outdir = Path(flags.outdir)
os.makedirs(str(outdir), exist_ok=True)
flags_dict = dataclasses.asdict(flags)
save_yaml(str(outdir / "flags.yaml"), flags_dict)
inputdir = Path("/kaggle/input")
datadir = inputdir / "vinbigdata-chest-xray-abnormalities-detection"
imgdir = inputdir / flags.imgdir_name
train = pd.read_csv(datadir / "train.csv")
<groupby>
|
model_sol_3.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'] )
|
Digit Recognizer
|
6,925,490 |
is_normal_df = train.groupby("image_id")["class_id"].agg(lambda s:(s == 14 ).sum() ).reset_index().rename({"class_id": "num_normal_annotations"}, axis=1)
is_normal_df.head()<categorify>
|
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', verbose=1, save_best_only=True)
hist_sol_3 = model_sol_3.fit_generator(generator=train_batches, steps_per_epoch =extended_splitted_train_X.shape[0] // batch_size,
epochs=32, callbacks=[checkpointer],
validation_data=val_batches, validation_steps=extended_splitted_test_X.shape[0] // batch_size,
verbose=2 )
|
Digit Recognizer
|
6,925,490 |
num_normal_anno_counts_df = num_normal_anno_counts.reset_index()
num_normal_anno_counts_df["name"] = num_normal_anno_counts_df["index"].map({0: "Abnormal", 3: "Normal"})
num_normal_anno_counts_df<define_variables>
|
model_sol_3.load_weights('mnist.model.best.hdf5')
score = model_sol_3.evaluate(extended_splitted_test_X, ohe_splitted_test_y, verbose=0)
accuracy = 100 * score[1]
print('Test accuracy: %.4f%%' % accuracy )
|
Digit Recognizer
|
6,925,490 |
def get_vinbigdata_dicts(
imgdir: Path,
train_df: pd.DataFrame,
train_data_type: str = "original",
use_cache: bool = True,
debug: bool = True,
target_indices: Optional[np.ndarray] = None,
):
debug_str = f"_debug{int(debug)}"
train_data_type_str = f"_{train_data_type}"
cache_path = Path(".")/ f"dataset_dicts_cache{train_data_type_str}{debug_str}.pkl"
if not use_cache or not cache_path.exists() :
print("Creating data...")
train_meta = pd.read_csv(imgdir / "train_meta.csv")
if debug:
train_meta = train_meta.iloc[:500]
image_id = train_meta.loc[0, "image_id"]
image_path = str(imgdir / "train" / f"{image_id}.png")
image = cv2.imread(image_path)
resized_height, resized_width, ch = image.shape
print(f"image shape: {image.shape}")
dataset_dicts = []
for index, train_meta_row in tqdm(train_meta.iterrows() , total=len(train_meta)) :
record = {}
image_id, height, width = train_meta_row.values
filename = str(imgdir / "train" / f"{image_id}.png")
record["file_name"] = filename
record["image_id"] = image_id
record["height"] = resized_height
record["width"] = resized_width
objs = []
for index2, row in train_df.query("image_id == @image_id" ).iterrows() :
class_id = row["class_id"]
if class_id == 14:
pass
else:
h_ratio = resized_height / height
w_ratio = resized_width / width
bbox_resized = [
int(row["x_min"])* w_ratio,
int(row["y_min"])* h_ratio,
int(row["x_max"])* w_ratio,
int(row["y_max"])* h_ratio,
]
obj = {
"bbox": bbox_resized,
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": class_id,
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
with open(cache_path, mode="wb")as f:
pickle.dump(dataset_dicts, f)
print(f"Load from cache {cache_path}")
with open(cache_path, mode="rb")as f:
dataset_dicts = pickle.load(f)
if target_indices is not None:
dataset_dicts = [dataset_dicts[i] for i in target_indices]
return dataset_dicts
def get_vinbigdata_dicts_test(
imgdir: Path, test_meta: pd.DataFrame, use_cache: bool = True, debug: bool = True,
):
debug_str = f"_debug{int(debug)}"
cache_path = Path(".")/ f"dataset_dicts_cache_test{debug_str}.pkl"
if not use_cache or not cache_path.exists() :
print("Creating data...")
if debug:
test_meta = test_meta.iloc[:500]
image_id = test_meta.loc[0, "image_id"]
image_path = str(imgdir / "test" / f"{image_id}.png")
image = cv2.imread(image_path)
resized_height, resized_width, ch = image.shape
print(f"image shape: {image.shape}")
dataset_dicts = []
for index, test_meta_row in tqdm(test_meta.iterrows() , total=len(test_meta)) :
record = {}
image_id, height, width = test_meta_row.values
filename = str(imgdir / "test" / f"{image_id}.png")
record["file_name"] = filename
record["image_id"] = image_id
record["height"] = resized_height
record["width"] = resized_width
dataset_dicts.append(record)
with open(cache_path, mode="wb")as f:
pickle.dump(dataset_dicts, f)
print(f"Load from cache {cache_path}")
with open(cache_path, mode="rb")as f:
dataset_dicts = pickle.load(f)
return dataset_dicts
<categorify>
|
extended_test_x = test_x[..., tf.newaxis]
predictions = model_sol_3.predict(extended_test_x)
predictions = [ np.argmax(x)for x in predictions ]
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
submission.drop('Label', axis=1, inplace=True)
submission['Label'] = predictions
submission.to_csv('submission3.csv', index=False )
|
Digit Recognizer
|
6,925,490 |
class DatasetMixin(Dataset):
def __init__(self, transform=None):
self.transform = transform
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
if isinstance(index, slice):
current, stop, step = index.indices(len(self))
return [self.get_example_wrapper(i)for i in
six.moves.range(current, stop, step)]
elif isinstance(index, list)or isinstance(index, numpy.ndarray):
return [self.get_example_wrapper(i)for i in index]
else:
return self.get_example_wrapper(index)
def __len__(self):
raise NotImplementedError
def get_example_wrapper(self, i):
example = self.get_example(i)
if self.transform:
example = self.transform(example)
return example
def get_example(self, i):
raise NotImplementedError
<categorify>
|
model_sol_4_1 = Sequential()
model_sol_4_1.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', input_shape=extended_splitted_train_X.shape[1:]))
model_sol_4_1.add(BatchNormalization())
model_sol_4_1.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
model_sol_4_1.add(MaxPooling2D(pool_size=2))
model_sol_4_1.add(Dropout(0.1))
model_sol_4_1.add(BatchNormalization())
model_sol_4_1.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
model_sol_4_1.add(MaxPooling2D(pool_size=2))
model_sol_4_1.add(Flatten())
model_sol_4_1.add(BatchNormalization())
model_sol_4_1.add(Dense(64))
model_sol_4_1.add(Activation('relu'))
model_sol_4_1.add(Dropout(0.2))
model_sol_4_1.add(BatchNormalization())
model_sol_4_1.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
6,925,490 |
class VinbigdataTwoClassDataset(DatasetMixin):
def __init__(self, dataset_dicts, image_transform=None, transform=None, train: bool = True,
mixup_prob: float = -1.0, label_smoothing: float = 0.0):
super(VinbigdataTwoClassDataset, self ).__init__(transform=transform)
self.dataset_dicts = dataset_dicts
self.image_transform = image_transform
self.train = train
self.mixup_prob = mixup_prob
self.label_smoothing = label_smoothing
def _get_single_example(self, i):
d = self.dataset_dicts[i]
filename = d["file_name"]
img = cv2.imread(filename)
if self.image_transform:
img = self.image_transform(img)
img = torch.tensor(np.transpose(img,(2, 0, 1)).astype(np.float32))
if self.train:
label = int(len(d["annotations"])> 0)
if self.label_smoothing > 0:
if label == 0:
return img, float(label)+ self.label_smoothing
else:
return img, float(label)- self.label_smoothing
else:
return img, float(label)
else:
return img, None
def get_example(self, i):
img, label = self._get_single_example(i)
if self.mixup_prob > 0.and np.random.uniform() < self.mixup_prob:
j = np.random.randint(0, len(self.dataset_dicts))
p = np.random.uniform()
img2, label2 = self._get_single_example(j)
img = img * p + img2 *(1 - p)
if self.train:
label = label * p + label2 *(1 - p)
if self.train:
label_logit = torch.tensor([1 - label, label], dtype=torch.float32)
return img, label_logit
else:
return img
def __len__(self):
return len(self.dataset_dicts )<create_dataframe>
|
model_sol_4_1.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'] )
|
Digit Recognizer
|
6,925,490 |
dataset_dicts = get_vinbigdata_dicts(imgdir, train, debug=debug)
dataset = VinbigdataTwoClassDataset(dataset_dicts )<normalization>
|
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', verbose=1, save_best_only=True)
hist_sol_4 = model_sol_4_1.fit_generator(generator=train_batches, steps_per_epoch =extended_splitted_train_X.shape[0] // batch_size,
epochs=32, callbacks=[checkpointer],
validation_data=val_batches, validation_steps=extended_splitted_test_X.shape[0] // batch_size,
verbose=2 )
|
Digit Recognizer
|
6,925,490 |
class Transform:
def __init__(
self, hflip_prob: float = 0.5, ssr_prob: float = 0.5, random_bc_prob: float = 0.5
):
self.transform = A.Compose(
[
A.HorizontalFlip(p=hflip_prob),
A.ShiftScaleRotate(
shift_limit=0.0625, scale_limit=0.1, rotate_limit=10, p=ssr_prob
),
A.RandomBrightnessContrast(p=random_bc_prob),
]
)
def __call__(self, image):
image = self.transform(image=image)["image"]
return image
<create_dataframe>
|
model_sol_4_1.load_weights('mnist.model.best.hdf5')
score = model_sol_4_1.evaluate(extended_splitted_test_X, ohe_splitted_test_y, verbose=0)
accuracy = 100 * score[1]
print('Test accuracy: %.4f%%' % accuracy )
|
Digit Recognizer
|
6,925,490 |
aug_dataset = VinbigdataTwoClassDataset(dataset_dicts, image_transform=Transform() )<categorify>
|
model_sol_4_2 = Sequential()
model_sol_4_2.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', input_shape=extended_splitted_train_X.shape[1:]))
model_sol_4_2.add(BatchNormalization())
model_sol_4_2.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
model_sol_4_2.add(MaxPooling2D(pool_size=2))
model_sol_4_2.add(Dropout(0.1))
model_sol_4_2.add(BatchNormalization())
model_sol_4_2.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
model_sol_4_2.add(MaxPooling2D(pool_size=2))
model_sol_4_2.add(Flatten())
model_sol_4_2.add(BatchNormalization())
model_sol_4_2.add(Dense(64))
model_sol_4_2.add(Activation('relu'))
model_sol_4_2.add(Dropout(0.2))
model_sol_4_2.add(BatchNormalization())
model_sol_4_2.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
6,925,490 |
class Transform:
def __init__(self, aug_kwargs: Dict):
self.transform = A.Compose(
[getattr(A, name )(**kwargs)for name, kwargs in aug_kwargs.items() ]
)
def __call__(self, image):
image = self.transform(image=image)["image"]
return image<init_hyperparams>
|
model_sol_4_2.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'] )
|
Digit Recognizer
|
6,925,490 |
class CNNFixedPredictor(nn.Module):
def __init__(self, cnn: nn.Module, num_classes: int = 2):
super(CNNFixedPredictor, self ).__init__()
self.cnn = cnn
self.lin = Linear(cnn.num_features, num_classes)
print("cnn.num_features", cnn.num_features)
for param in self.cnn.parameters() :
param.requires_grad = False
def forward(self, x):
feat = self.cnn(x)
return self.lin(feat)
<choose_model_class>
|
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', verbose=1, save_best_only=True)
hist_sol_4 = model_sol_4_2.fit_generator(generator=train_batches, steps_per_epoch=extended_splitted_train_X.shape[0] // batch_size,
epochs=32, callbacks=[checkpointer],
validation_data=val_batches, validation_steps=extended_splitted_test_X.shape[0] // batch_size,
verbose=2 )
|
Digit Recognizer
|
6,925,490 |
def build_predictor(model_name: str, model_mode: str = "normal"):
if model_mode == "normal":
return timm.create_model(model_name, pretrained=True, num_classes=2, in_chans=3)
elif model_mode == "cnn_fixed":
timm_model = timm.create_model(model_name, pretrained=True, num_classes=0, in_chans=3)
return CNNFixedPredictor(timm_model, num_classes=2)
else:
raise ValueError(f"[ERROR] Unexpected value model_mode={model_mode}")
<compute_test_metric>
|
model_sol_4_2.load_weights('mnist.model.best.hdf5')
score = model_sol_4_2.evaluate(extended_splitted_test_X, ohe_splitted_test_y, verbose=0)
accuracy = 100 * score[1]
print('Test accuracy: %.4f%%' % accuracy )
|
Digit Recognizer
|
6,925,490 |
def accuracy(y: torch.Tensor, t: torch.Tensor)-> torch.Tensor:
assert y.shape[:-1] == t.shape, f"y {y.shape}, t {t.shape} is inconsistent."
pred_label = torch.max(y.detach() , dim=-1)[1]
count = t.nelement()
correct =(pred_label == t ).sum().float()
acc = correct / count
return acc
def accuracy_with_logits(y: torch.Tensor, t: torch.Tensor)-> torch.Tensor:
assert y.shape == t.shape
gt_label = torch.max(t.detach() , dim=-1)[1]
return accuracy(y, gt_label )<compute_train_metric>
|
extended_test_x = test_x[..., tf.newaxis]
predictions = model_sol_4_2.predict(extended_test_x)
predictions = [ np.argmax(x)for x in predictions ]
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
submission.drop('Label', axis=1, inplace=True)
submission['Label'] = predictions
submission.to_csv('submission4.csv', index=False )
|
Digit Recognizer
|
6,925,490 |
def cross_entropy_with_logits(input, target, dim=-1):
loss = torch.sum(- target * F.log_softmax(input, dim), dim)
return loss.mean()
<find_best_params>
|
train_y_sol5 = mnist_train_complete.iloc[:, 0].values.astype('int32')
train_x_sol5 = mnist_train_complete.iloc[:, 1:].values.astype('float32')
test_x_sol5 = mnist_test_complete.values.astype('float32')
train_x_sol5 = train_x_sol5.reshape(train_x_sol5.shape[0], 28, 28)
test_x_sol5 = test_x_sol5.reshape(test_x_sol5.shape[0], 28, 28)
train_x_sol5 = train_x_sol5[..., tf.newaxis]
test_x_sol5 = test_x_sol5[..., tf.newaxis]
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.