kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
7,429,783 |
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100)
seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
if not os.path.exists("results"):
os.mkdir("results")
TRAINING = True
read_path = '/kaggle/input/jane-street-market-prediction/train.csv'
save_path = os.path.join("results", str(seed))
device = torch.device("cuda:0")
if not os.path.exists(save_path):
os.mkdir(save_path)
<define_variables>
|
results=classifier.predict_classes(df_test)
print(results)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("submission.csv",index=False,header=True )
|
Digit Recognizer
|
7,429,783 |
TRAINING = True<data_type_conversions>
|
( X_train,y_train),(X_test,y_test)=mnist.load_data()
print(X_train.shape)
print(X_test.shape)
print(type(X_train))
img=X_train[0]
plt.imshow(img,cmap='gray')
print(type(img))
|
Digit Recognizer
|
7,429,783 |
train = pd.read_csv(read_path)
features = [c for c in train.columns if 'feature' in c]
f_mean = train[features].mean()
train = train.loc[train.weight > 0].reset_index(drop = True)
train[features] = train[features].fillna(f_mean)
train = train.astype("float32")
train['action'] =(train['resp'] > 0 ).astype('int')
train['action1'] =(train['resp_1'] > 0 ).astype('int')
train['action2'] =(train['resp_2'] > 0 ).astype('int')
train['action3'] =(train['resp_3'] > 0 ).astype('int')
train['action4'] =(train['resp_4'] > 0 ).astype('int')
targets = ['resp']
targets = ['action', 'action1', 'action2', 'action3', 'action4']
def add_features(df, features):
new_features = copy.deepcopy(features)
df["cross_1_2"] = df["feature_1"] /(df["feature_2"] + 1e-5)
df["cross_41_42_43"] = df["feature_41"] + df["feature_42"] + df["feature_43"]
new_features.extend(["cross_1_2", "cross_41_42_43"])
return df, new_features
train, train_features = add_features(train, features)
<filter>
|
X_train=X_train.reshape(60000,28,28,1)
X_test=X_test.reshape(10000,28,28,1)
y_cat_train=to_categorical(y_train,10)
y_cat_test=to_categorical(y_test,10 )
|
Digit Recognizer
|
7,429,783 |
test = train.loc[(train.date >= 450)&(train.date < 500)].reset_index(drop=True)
<data_type_conversions>
|
classifier.fit(X_train,y_cat_train,epochs=25 )
|
Digit Recognizer
|
7,429,783 |
class MyDataset:
def __init__(self, df, features, targets):
self.features = df[features].values
self.labels = df[targets].values
self.weights = df['weight'].values
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
feat_ = torch.tensor(self.features[idx], dtype=torch.float)
label_ = torch.tensor(self.labels[idx], dtype=torch.float)
weight_ = torch.tensor(self.weights[idx], dtype=torch.float)
return feat_, label_, weight_<choose_model_class>
|
results=classifier.predict_classes(df_test)
print(results)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("submission1.csv",index=False,header=True )
|
Digit Recognizer
|
7,429,783 |
class Model(nn.Module):
def __init__(self, all_feat_cols, target_cols):
super(Model, self ).__init__()
self.batch_norm0 = nn.BatchNorm1d(len(all_feat_cols))
self.dropout0 = nn.Dropout(0.1)
dropout_rate = 0.1
hidden_size = 256
self.dense1 = nn.Linear(len(all_feat_cols), hidden_size)
self.batch_norm1 = nn.BatchNorm1d(hidden_size)
self.dropout1 = nn.Dropout(dropout_rate)
self.dense2 = nn.Linear(hidden_size+len(all_feat_cols), hidden_size)
self.batch_norm2 = nn.BatchNorm1d(hidden_size)
self.dropout2 = nn.Dropout(dropout_rate)
self.dense3 = nn.Linear(hidden_size+hidden_size, hidden_size)
self.batch_norm3 = nn.BatchNorm1d(hidden_size)
self.dropout3 = nn.Dropout(dropout_rate)
self.dense4 = nn.Linear(hidden_size+hidden_size, hidden_size)
self.batch_norm4 = nn.BatchNorm1d(hidden_size)
self.dropout4 = nn.Dropout(dropout_rate)
self.dense5 = nn.Linear(2 * hidden_size, 128)
self.batch_norm5 = nn.BatchNorm1d(128)
self.dropout5 = nn.Dropout(dropout_rate)
self.dense6 = nn.Linear(128, 128)
self.batch_norm6 = nn.BatchNorm1d(128)
self.dropout6 = nn.Dropout(dropout_rate)
self.dense7 = nn.Linear(128, len(target_cols))
self.Relu = nn.ReLU(inplace=True)
self.PReLU = nn.PReLU()
self.LeakyReLU = nn.LeakyReLU(negative_slope=0.01, inplace=True)
self.RReLU = nn.RReLU()
def forward(self, x):
x = self.batch_norm0(x)
x = self.dropout0(x)
x1 = self.dense1(x)
x1 = self.batch_norm1(x1)
x1 = self.RReLU(x1)
x1 = self.dropout1(x1)
x = torch.cat([x, x1], 1)
x2 = self.dense2(x)
x2 = self.batch_norm2(x2)
x2 = self.RReLU(x2)
x2 = self.dropout2(x2)
x = torch.cat([x1, x2], 1)
x3 = self.dense3(x)
x3 = self.batch_norm3(x3)
x3 = self.RReLU(x3)
x3 = self.dropout3(x3)
x = torch.cat([x2, x3], 1)
x4 = self.dense4(x)
x4 = self.batch_norm4(x4)
x4 = self.RReLU(x4)
x4 = self.dropout4(x4)
x = torch.cat([x3, x4], 1)
x = self.dense5(x)
x = self.batch_norm5(x)
x = self.RReLU(x)
x = self.dropout5(x)
x = self.dense6(x)
x = self.batch_norm6(x)
x = self.RReLU(x)
x = self.dropout6(x)
x = self.dense7(x)
return x
class SmoothBCEwLogits(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth(targets:torch.Tensor, n_labels:int, smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad() :
targets = targets *(1.0 - smoothing)+ 0.5 * smoothing
return targets
def forward(self, inputs, targets, weights=None):
targets = SmoothBCEwLogits._smooth(targets, inputs.size(-1),
self.smoothing)
loss = F.binary_cross_entropy_with_logits(inputs, targets, weights)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
class EarlyStopping:
def __init__(self, patience=7, mode="max", delta=0.) :
self.patience = patience
self.counter = 0
self.mode = mode
self.best_score = None
self.early_stop = False
self.delta = delta
if self.mode == "min":
self.val_score = np.Inf
else:
self.val_score = -np.Inf
def __call__(self, epoch_score, model, model_path):
if self.mode == "min":
score = -1.0 * epoch_score
else:
score = np.copy(epoch_score)
if self.best_score is None:
self.best_score = score
self.save_checkpoint(epoch_score, model, model_path)
elif score < self.best_score:
self.counter += 1
print('EarlyStopping counter: {} out of {}'.format(self.counter, self.patience))
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(epoch_score, model, model_path)
self.counter = 0
def save_checkpoint(self, epoch_score, model, model_path):
if epoch_score not in [-np.inf, np.inf, -np.nan, np.nan]:
print('Validation score improved.Saving model!')
torch.save(model.state_dict() , model_path)
self.val_score = epoch_score<train_model>
|
classifier.fit(X_test,y_cat_test,epochs=25 )
|
Digit Recognizer
|
7,429,783 |
<split><EOS>
|
results=classifier.predict_classes(df_test)
print(results)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("submission2.csv",index=False,header=True )
|
Digit Recognizer
|
4,208,051 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<find_best_params>
|
print('Working on "%s"' % Path('.' ).absolute() )
|
Digit Recognizer
|
4,208,051 |
if not TRAINING:
models = []
for i in [0, 1, 2, 3, 4]:
torch.cuda.empty_cache()
device = torch.device("cuda:0")
model = Model(train_features, targets)
model.to(device)
model.eval()
ckp_path = f'/kaggle/input/skeleton-with-pytorch/JSModel_{i}.pth'
model.load_state_dict(torch.load(ckp_path))
models.append(model )<find_best_params>
|
class NumpyImageList(ImageList):
def open(self, fn):
img = fn.reshape(28,28,1)
return Image(pil2tensor(img, dtype=np.float32))
@classmethod
def from_csv(cls, path:PathOrStr, csv:str, **kwargs)->'ItemList':
df = pd.read_csv(Path(path)/csv, header='infer')
res = super().from_df(df, path=path, cols=0, **kwargs)
if 'label' in df.columns:
df = df.drop('label', axis=1)
df = np.array(df)/255.
res.items =(df-df.mean())/df.std()
return res
|
Digit Recognizer
|
4,208,051 |
if TRAINING:
models = []
for i in [1, 3]:
torch.cuda.empty_cache()
device = torch.device("cuda:0")
model = Model(train_features, targets)
model.to(device)
model.eval()
ckp_path = f'./JSModel_{i}.pth'
model.load_state_dict(torch.load(ckp_path))
models.append(model )<categorify>
|
test = NumpyImageList.from_csv('.. /input/', 'test.csv')
test
|
Digit Recognizer
|
4,208,051 |
models = [models[1], models[3], models[2]]
batch_size = 4096
label_smoothing = 1e-2
loss_fn = SmoothBCEwLogits(smoothing=label_smoothing)
test_pred = np.zeros(( len(test), len(targets)))
test_set = MyDataset(test, train_features, targets)
test_loader = DataLoader(test_set, batch_size=4096, shuffle=False, num_workers=4)
for id_, model in enumerate(models):
test_pred_, _, __ = inference_fn(model, test_loader, device)
test_pred += test_pred_ / len(models)
test_pred_ = np.median(test_pred_, axis=1)
test_pred_ = np.where(test_pred_ >= 0.50, 1, 0 ).astype(int)
auc_score = roc_auc_score(test['action'].values.reshape(-1, 1), test_pred_)
u_score = utility_score(date=test.date.values.reshape(-1),
weight=test.weight.values.reshape(-1),
resp=test.resp.values.reshape(-1),
action=test_pred_.reshape(-1))
print("model", id_, "auc:", auc_score, "u_score:", u_score)
test_preds = np.median(test_pred, axis=1)
test_preds = np.where(test_preds >= 0.50, 1, 0 ).astype(int)
auc_score = roc_auc_score(test['action'].values.reshape(-1, 1), test_preds)
u_score = utility_score(date=test.date.values.reshape(-1),
weight=test.weight.values.reshape(-1),
resp=test.resp.values.reshape(-1),
action=test_preds.reshape(-1))
print(auc_score, u_score )<statistical_test>
|
tfms = get_transforms(do_flip=False)
data =(NumpyImageList.from_csv('.. /input/', 'train.csv')
.split_by_rand_pct (.1)
.label_from_df(cols='label')
.add_test(test, label=0)
.transform(tfms)
.databunch(bs=128, num_workers=0)
.normalize(imagenet_stats))
data
|
Digit Recognizer
|
4,208,051 |
env = janestreet.make_env()
env_iter = env.iter_test()
th = 0.5
for(test_df, pred_df)in tqdm(env_iter):
if test_df['weight'].item() > 0:
x_tt = test_df.loc[:, features].values
if np.isnan(x_tt.sum()):
x_tt = np.nan_to_num(x_tt)+ np.isnan(x_tt)* f_mean.values.reshape(1, -1)
feature_inp = pd.DataFrame(x_tt)
feature_inp.columns = features
feature_inp, _ = add_features(feature_inp,features)
feature_inp = torch.tensor(feature_inp.values, dtype=torch.float ).to(device)
pred = np.zeros(( 1, len(targets)))
for model in models:
pred += model(feature_inp ).detach().cpu().numpy()
pred /= len(models)
pred = pred.mean(axis=1 ).item()
pred_df.action = int(pred >= 0)
else:
pred_df.action = 0
env.predict(pred_df )<import_modules>
|
data.show_batch(rows=5, figsize=(10,10))
|
Digit Recognizer
|
4,208,051 |
import os
import time
import pickle
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.metrics import log_loss, roc_auc_score
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.functional as F
<import_modules>
|
dropout = 0.25
model = nn.Sequential(
nn.Conv2d(in_channels=3,
out_channels=32,
kernel_size=5),
nn.ReLU() ,
nn.BatchNorm2d(32),
nn.MaxPool2d(kernel_size=2,
stride=2),
nn.Dropout(dropout),
nn.Conv2d(in_channels=32,
out_channels=64,
kernel_size=3),
nn.ReLU() ,
nn.BatchNorm2d(64),
nn.MaxPool2d(kernel_size=2,
stride=2),
nn.Dropout(dropout),
nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=3),
nn.ReLU() ,
Flatten() ,
nn.Linear(64*3*3, 256),
nn.ReLU() ,
nn.Dropout(dropout*1.5),
nn.BatchNorm1d(256),
nn.Linear(256, 10),
)
if torch.cuda.is_available() :
model = model.cuda()
learn = Learner(data, model, metrics=accuracy, model_dir='/kaggle/working/models')
learn.summary()
|
Digit Recognizer
|
4,208,051 |
from tensorflow.keras.layers import Input, Dense, BatchNormalization, Dropout, Concatenate, Lambda, GaussianNoise, Activation
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Input, Dense, BatchNormalization, Dropout, Concatenate, Lambda, GaussianNoise, Activation
import tensorflow as tf
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm
import tensorflow_addons as tfa
from random import choices
from numba import njit
import matplotlib.pyplot as plt
import os,gc
import random
from random import choices
from sklearn import datasets
from tensorflow.python.framework import ops<prepare_x_and_y>
|
learn.fit_one_cycle(20, max_lr=slice(1e-1))
|
Digit Recognizer
|
4,208,051 |
SEED = 1111
tf.random.set_seed(SEED)
np.random.seed(SEED)
train = pd.read_csv('/kaggle/input/jane-street-market-prediction/train.csv')
train = train.query('date > 85' ).reset_index(drop = True)
train = train[train['weight'] != 0]
features_mean = []
features = [c for c in train.columns if 'feature' in c]
for i in features:
x = train[i].mean()
features_mean.append(x)
train[i] = train[i].fillna(x)
train['action'] =(( train['resp'].values)> 0 ).astype(int)
resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4']
X_train = train.loc[:, train.columns.str.contains('feature')]
y_train = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T<define_search_space>
|
learn.save('stage1' )
|
Digit Recognizer
|
4,208,051 |
epochs = 200
batch_size = 4096
hidden_units = [160, 160, 160]
dropout_rates = [0.20, 0.20, 0.20, 0.20]
label_smoothing = 1e-2
learning_rate = 1e-3<choose_model_class>
|
learn.fit_one_cycle(5, max_lr=slice(1e-2))
|
Digit Recognizer
|
4,208,051 |
def create_mlp(
num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate
):
inp = tf.keras.layers.Input(shape=(num_columns,))
x = tf.keras.layers.BatchNormalization()(inp)
x = tf.keras.layers.Dropout(dropout_rates[0] )(x)
for i in range(len(hidden_units)) :
x = tf.keras.layers.Dense(hidden_units[i] )(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(tf.keras.activations.swish )(x)
x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x)
x = tf.keras.layers.Dense(num_labels )(x)
out = tf.keras.layers.Activation("sigmoid" )(x)
model = tf.keras.models.Model(inputs=inp, outputs=out)
model.compile(
optimizer = tfa.optimizers.RectifiedAdam(learning_rate=learning_rate),
loss = tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing),
metrics = tf.keras.metrics.AUC(name='AUC')
)
return model<train_model>
|
learn.save('stage2' )
|
Digit Recognizer
|
4,208,051 |
tf.keras.backend.clear_session()
tf.random.set_seed(SEED)
clf = create_mlp(
len(features), 5, hidden_units, dropout_rates, label_smoothing, learning_rate
)
clf.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=2 )<feature_engineering>
|
predictions, *_ = learn.get_preds(DatasetType.Test)
labels = np.argmax(predictions, 1)
submission_df = pd.DataFrame({'ImageId': list(range(1,len(labels)+1)) , 'Label': labels})
submission_df.to_csv(f'submission.csv', index=False )
|
Digit Recognizer
|
5,844,541 |
models = []
models.append(clf)
th = 0.503
f = np.median
f_mean = np.mean(train[features[1:]].values,axis=0)
env = janestreet.make_env()
for(test_df, pred_df)in tqdm(env.iter_test()):
if test_df['weight'].item() > 0:
x_tt = test_df.loc[:, features].values
if np.isnan(x_tt[:, 1:].sum()):
x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean
pred = np.mean([model(x_tt, training = False ).numpy() for model in models],axis=0)
pred = f(pred)
pred_df.action = np.where(pred >= th, 1, 0 ).astype(int)
else:
pred_df.action = 0
env.predict(pred_df )<load_from_csv>
|
%matplotlib inline
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep' )
|
Digit Recognizer
|
5,844,541 |
df = dtable.fread('/kaggle/input/jane-street-market-prediction/train.csv' ).to_pandas()
df = df.query('date > 85' ).reset_index(drop = True)
df = df[df.weight > 0]
df.reset_index(inplace=True, drop=True)
df = df.astype({c: np.float32 for c in df.select_dtypes(include='float64' ).columns})
df_labels = df[['date', 'weight', 'resp_1', 'resp_2', 'resp_3', 'resp_4', 'resp']]
features = [c for c in df.columns if 'feature' in c]
df = df.drop(df_labels.columns, axis=1)
df.fillna(df.mean() , inplace=True)
resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp_4', 'resp']
X = df[features]
y = np.stack([(df_labels[c] > 0 ).astype('int')for c in resp_cols] ).T
f_mean = np.mean(df[features[1:]].values, axis=0)
device = 'cuda' if t.cuda.is_available() else 'cpu'<choose_model_class>
|
train=pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test=pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
5,844,541 |
class Model(nn.Module):
def __init__(self, input_size):
super(Model, self ).__init__()
hs = 256
self.batch_norm0 = nn.BatchNorm1d(input_size)
self.layer1 = LinBnDrop(input_size, hs, bn=True, p=0, act=Mish() , lin_first=False)
self.layer2 = LinBnDrop(hs + input_size, hs, bn=True, p=0.2289, act=Mish() , lin_first=False)
self.layer3 = LinBnDrop(hs + hs, hs, bn=True, p=0.2289, act=Mish() , lin_first=False)
self.layer4 = LinBnDrop(hs + hs, hs, bn=True, p=0.2, act=Mish() , lin_first=False)
self.dense5 = nn.Linear(hs + hs, 5)
def forward(self, x):
x = self.batch_norm0(x)
x1 = self.layer1(x)
x = torch.cat([x, x1], 1)
x2 = self.layer2(x)
x = torch.cat([x1, x2], 1)
x3 = self.layer3(x)
x = torch.cat([x2, x3], 1)
x4 = self.layer4(x)
x = torch.cat([x3, x4], 1)
x = self.dense5(x)
return x<load_pretrained>
|
def load_data() :
path='/kaggle/input/mnist-numpy/mnist.npz'
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
return(x_train, y_train),(x_test, y_test )
|
Digit Recognizer
|
5,844,541 |
model_nn = Model(len(features))
model_nn = model_nn.to(device)
learn = Learner(None, model_nn, loss_func=1)
learn.load('/kaggle/input/roclossjs3/dense_model')
<categorify>
|
Y_train=train["label"]
X_train=train.drop("label",axis=1)
(x_train1,y_train1),(x_test1,y_test1)=load_data()
train1=np.concatenate([x_train1,x_test1],axis=0)
y_train1=np.concatenate([y_train1,y_test1],axis=0)
Y_train1=y_train1
X_train1=train1.reshape(-1,28*28 )
|
Digit Recognizer
|
5,844,541 |
@njit
def fillna_npwhere_njit(array, values):
if np.isnan(array.sum()):
array = np.where(np.isnan(array), values, array)
return array
def for_loop(method, matrix, values):
for i in range(matrix.shape[0]):
matrix[i] = method(matrix[i], values)
return matrix<find_best_params>
|
X_train=X_train/255.0
test=test/255.0
X_train1=X_train1/255.0
|
Digit Recognizer
|
5,844,541 |
%%time
%%capture
env = janestreet.make_env()
learn.model.eval()
preds = []
for(test_df, pred_df)in tqdm(env.iter_test()):
if test_df['weight'].item() > 0:
x_tt = test_df.loc[:, features].values
x_tt[:, 1:] = for_loop(fillna_npwhere_njit, x_tt[:, 1:], f_mean)
pred = 0.
pred = learn.model(t.from_numpy(x_tt ).to(device, t.float)).sigmoid() [0][-1].item()
preds.append(pred)
action = 1 if pred >=.5 else 0
pred_df.action = action
else:
pred_df.action = 0
env.predict(pred_df )<compute_test_metric>
|
X_train = np.concatenate(( X_train.values, X_train1))
Y_train = np.concatenate(( Y_train, Y_train1))
X_train=X_train.reshape(-1,28,28,1)
test=test.values.reshape(-1,28,28,1)
|
Digit Recognizer
|
5,844,541 |
preds = np.array(preds)
preds.mean() , preds.std() , sum(preds >=.5), sum(preds < 5 )<import_modules>
|
Y_train = to_categorical(Y_train,num_classes=10)
|
Digit Recognizer
|
5,844,541 |
tf.__version__<define_variables>
|
random_seed=2
|
Digit Recognizer
|
5,844,541 |
SEED = 1111
tf.random.set_seed(SEED)
np.random.seed(SEED)
<load_from_csv>
|
X_train,X_val,Y_train,Y_val=train_test_split(X_train,Y_train,test_size=0.25,random_state=random_seed)
|
Digit Recognizer
|
5,844,541 |
%%time
train=pd.read_parquet('.. /input/step01-csv-parquet/dtrain.parquet')
train = train.query('date > 85' ).reset_index(drop = True)
train = train[train['weight'] != 0]
train['action'] =(( train['resp'].values)> 0 ).astype(int)
train.fillna(train.mean() ,inplace=True)
raw_features = [c for c in train.columns if "feature" in c]
f_mean = np.mean(train[raw_features[1:]].values,axis=0)
np.save("f_mean_online.npy",f_mean)
train.shape<feature_engineering>
|
model=Sequential()
model.add(Conv2D(filters=64,kernel_size=(5,5),padding='Same',activation='relu',input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=64,kernel_size=(5,5),padding='Same',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2D(filters = 128, kernel_size =(3,3), padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 128, kernel_size =(3,3), padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
5,844,541 |
def stats_features(tmp_df):
tmp_df['feature_cross_41_42_43']=tmp_df['feature_41']+tmp_df['feature_42']+tmp_df['feature_43']
tmp_df['feature_cross_1_2']=tmp_df['feature_1']/(tmp_df['feature_2']+1e-5)
tmp_df.head()
return tmp_df
<feature_engineering>
|
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
Image("model.png" )
|
Digit Recognizer
|
5,844,541 |
train=stats_features(train)
train.head()<define_variables>
|
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
|
Digit Recognizer
|
5,844,541 |
features = [c for c in train.columns if "feature" in c]<prepare_x_and_y>
|
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
5,844,541 |
valid = train.loc[(train.date >= 450)&(train.date < 500)].reset_index(drop=True)
resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4']
X_train = train.loc[:, train.columns.str.contains('feature')]
y_train = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T
X_valid = valid.loc[:, valid.columns.str.contains('feature')]
y_valid = np.stack([(valid[c] > 0 ).astype('int')for c in resp_cols] ).T
del train<choose_model_class>
|
learning_rate_reduction=ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 )
|
Digit Recognizer
|
5,844,541 |
def create_resnet(
num_columns, num_labels, hidden_size, dropout_rate, label_smoothing, learning_rate
):
inp = tf.keras.layers.Input(shape=(num_columns,))
x=tf.keras.layers.BatchNormalization()(inp)
x=tf.keras.layers.Dropout(dropout_rate )(x)
x1=tf.keras.layers.Dense(hidden_size )(x)
x1=tf.keras.layers.BatchNormalization()(x1)
x1=tf.keras.layers.LeakyReLU()(x1)
x1=tf.keras.layers.Dropout(dropout_rate )(x1)
x = tf.keras.layers.concatenate([x, x1], axis=1)
x2=tf.keras.layers.Dense(hidden_size )(x)
x2=tf.keras.layers.BatchNormalization(axis=1 )(x2)
x2=tf.keras.layers.LeakyReLU()(x2)
x2=tf.keras.layers.Dropout(dropout_rate )(x2)
x = tf.keras.layers.concatenate([x1, x2], axis=1)
x3=tf.keras.layers.Dense(hidden_size )(x)
x3=tf.keras.layers.BatchNormalization(axis=1 )(x3)
x3=tf.keras.layers.LeakyReLU()(x3)
x3=tf.keras.layers.Dropout(dropout_rate )(x3)
x = tf.keras.layers.concatenate([x2, x3], axis=1)
x4=tf.keras.layers.Dense(hidden_size )(x)
x4=tf.keras.layers.BatchNormalization(axis=1 )(x4)
x4=tf.keras.layers.LeakyReLU()(x4)
x4=tf.keras.layers.Dropout(dropout_rate )(x4)
x = tf.keras.layers.concatenate([x3, x4], axis=1)
x = tf.keras.layers.Dense(num_labels )(x)
out = tf.keras.layers.Activation("sigmoid" )(x)
model = tf.keras.models.Model(inputs=inp, outputs=out)
model.compile(
optimizer=tfa.optimizers.RectifiedAdam(lr=learning_rate,weight_decay=1e-5),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing),
metrics=tf.keras.metrics.AUC(name="AUC"),
)
return model
NUM_FOLDS=5
TRAINING=True
if TRAINING:
for i in range(NUM_FOLDS):
tf.keras.backend.clear_session()
SEED = 1111
tf.random.set_seed(SEED+i)
np.random.seed(SEED+i)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
verbose=1,
factor=0.2,
patience=8, mode='min')
earlystop_callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath='online_model_{}.weights'.format(i),
save_weights_only=True,
monitor='val_AUC',
mode='max',
verbose=1,
save_best_only=True)
dropout_rate=0.2
hidden_size=256
label_smoothing = 0.005
learning_rate = 1e-3
clf = create_resnet(
len(features), 5, hidden_size, dropout_rate, label_smoothing, learning_rate
)
clf.summary()
clf.fit(X_train, y_train, epochs=200, batch_size=4096,validation_data=(X_valid,y_valid),
callbacks=[reduce_lr,
earlystop_callback,
model_checkpoint_callback])
gc.collect()
<feature_engineering>
|
epochs = 50
batch_size = 128
|
Digit Recognizer
|
5,844,541 |
<import_modules>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train)
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction])
|
Digit Recognizer
|
5,844,541 |
<load_from_csv><EOS>
|
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False )
|
Digit Recognizer
|
4,864,231 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras.utils.np_utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
4,864,231 |
df3 = pd.read_csv('.. /input/0-286-private-norm/df_rcnn286.csv' )<merge>
|
train = pd.read_csv(".. /input/train.csv" )
|
Digit Recognizer
|
4,864,231 |
df4 = pd.merge(df, df3, on = 'image_id', how = 'left' )<feature_engineering>
|
test = pd.read_csv(".. /input/test.csv")
print(test.info())
test.head()
|
Digit Recognizer
|
4,864,231 |
list1 = [0,1,2,3,4,5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
for i in range(df4.shape[0]):
if df4.loc[i,'PredictionString'] == '14 1 0 0 1 1':
continue
a = df4.loc[i,'PredictionString']
b = a.split()
for j in range(int(len(a.split())/6)) :
for k in list1:
if int(b[0 + 6 * j])== k:
c = b[0 + 6 * j + 1]
b[0 + 6 * j + 1] = str(df4.loc[i,f'{k}'] * 0.05 + float(c)* 0.95)
df4.loc[i,'PredictionString'] = ' '.join(b)
<save_to_csv>
|
X_train = train.drop(['label'],axis=1 ).astype('float32')
y_train = train['label'].astype('float32')
X_test = test.values.astype('float32' )
|
Digit Recognizer
|
4,864,231 |
df_final = df4[['image_id', 'PredictionString']]
df_final.to_csv('submission.csv',index = False)
<import_modules>
|
def normalize(m):
return m / 255
X_train = normalize(X_train)
X_test = normalize(X_test )
|
Digit Recognizer
|
4,864,231 |
import numpy as np
import pandas as pd
<load_from_csv>
|
print('Labels')
print(y_train[:5])
y_train = to_categorical(y_train, 10)
print('Encoded labels')
print(y_train[:5] )
|
Digit Recognizer
|
4,864,231 |
pred_2class = pd.read_csv(".. /input/vinbigdata-2class-prediction/2-cls test pred.csv")
low_threshold = 0.0
high_threshold = 0.874
pred_2class<load_from_csv>
|
checkpoint = ModelCheckpoint(filepath='mnist.model.best.hdf5',
verbose=1,
save_best_only=True,
monitor='val_acc')
def build_model() :
model = Sequential([
Convolution2D(16,(3,3), activation='relu', input_shape=(28, 28, 1)) ,
BatchNormalization() ,
Convolution2D(16,(3,3), activation='relu'),
BatchNormalization() ,
MaxPooling2D() ,
Dropout(0.25),
Convolution2D(32,(3,3), activation='relu'),
Convolution2D(32,(3,3), activation='relu'),
BatchNormalization() ,
MaxPooling2D() ,
Dropout(0.25),
Flatten() ,
Dense(256, activation='relu'),
BatchNormalization() ,
Dropout(0.25),
Dense(10, activation='softmax')
])
model.compile(optimizer=Adam() ,
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
model = build_model()
|
Digit Recognizer
|
4,864,231 |
NORMAL = "14 1 0 0 1 1"
pred_det_df = pd.read_csv(".. /input/pp-old/submission_postprocessed(4 ).csv")
n_normal_before = len(pred_det_df.query("PredictionString == @NORMAL"))
merged_df = pd.merge(pred_det_df, pred_2class, on="image_id", how="left")
if "target" in merged_df.columns:
merged_df["class0"] = 1 - merged_df["target"]
c0, c1, c2 = 0, 0, 0
for i in range(len(merged_df)) :
p0 = merged_df.loc[i, "class0"]
if p0 < low_threshold:
c0 += 1
elif low_threshold <= p0 and p0 < high_threshold:
merged_df.loc[i, "PredictionString"] += f" 14 {p0} 0 0 1 1"
c1 += 1
else:
merged_df.loc[i, "PredictionString"] = NORMAL
c2 += 1
n_normal_after = len(merged_df.query("PredictionString == @NORMAL"))
print(
f"n_normal: {n_normal_before} -> {n_normal_after} with threshold {low_threshold} & {high_threshold}"
)
print(f"Keep {c0} Add {c1} Replace {c2}")
submission_filepath = str("submission.csv")
submission_df = merged_df[["image_id", "PredictionString"]]
submission_df.to_csv(submission_filepath, index=False)
print(f"Saved to {submission_filepath}")
<install_modules>
|
X_train, X_val, y_train, y_val = train_test_split(X_train,
y_train,
test_size=0.1,
random_state=42 )
|
Digit Recognizer
|
4,864,231 |
!pip install -U ensemble-boxes<import_modules>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 )
|
Digit Recognizer
|
4,864,231 |
import pandas as pd
import numpy as np
from ensemble_boxes import *
from glob import glob
import copy
from tqdm import tqdm
import shutil<load_from_csv>
|
image_generator = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
|
Digit Recognizer
|
4,864,231 |
height_dict = pd.read_csv('.. /input/vinbigdata-original-image-dataset/vinbigdata/test.csv' ).to_dict('records')
fnl_dict ={}
for ix,i in enumerate(height_dict):
fnl_dict[i['image_id']] = [i['width'],i['height'],i['width'],i['height']]<load_from_csv>
|
batch_size = 96
epochs = 60
steps_per_epoch = X_train.shape[0] / batch_size
batches = image_generator.flow(X_train, y_train, batch_size=batch_size )
|
Digit Recognizer
|
4,864,231 |
subs = [
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_1.csv'),
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_2.csv'),
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_3.csv'),
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_4.csv'),
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_5.csv'),
pd.read_csv('.. /input/vinbigdata-cxr-ad-yolov5-14-class-infer-184dd1/submission.csv')
]
pred_2cls = pd.read_csv('.. /input/vinbigdata-2class-prediction/2-cls test pred.csv' )<categorify>
|
history = model.fit_generator(generator=batches,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=(X_val, y_val),
callbacks=[checkpoint, learning_rate_reduction] )
|
Digit Recognizer
|
4,864,231 |
def submission_decoder(df:pd.DataFrame)-> np.ndarray:
info = df.values
df_lst = []
for i in info:
pre_lst = i[1].split(' ')
for j in range(0,len(pre_lst),6):
df_lst.append([i[0],int(pre_lst[j]),float(pre_lst[j+1]),int(pre_lst[j+2]),int(pre_lst[j+3]),\
int(pre_lst[j+4]),int(pre_lst[j+5]),fnl_dict.get(i[0])[0],fnl_dict.get(i[0])[1]])
return pd.DataFrame(df_lst,columns = ['image_id','class_id','score','x_min','y_min','x_max','y_max','width','height'] )<categorify>
|
model.load_weights('mnist.model.best.hdf5' )
|
Digit Recognizer
|
4,864,231 |
subs = [submission_decoder(subs[i])for i in range(len(subs)) ]<count_unique_values>
|
_, train_acc = model.evaluate(X_train, y_train, verbose=0)
_, test_acc = model.evaluate(X_val, y_val, verbose=0)
print('Train accuracy: %.3f, Test accuracy: %.3f' %(train_acc, test_acc))
|
Digit Recognizer
|
4,864,231 |
boxes_dict = {}
scores_dict = {}
labels_dict = {}
whwh_dict = {}
for i in tqdm(subs[0].image_id.unique()):
if not i in boxes_dict.keys() :
boxes_dict[i] = []
scores_dict[i] = []
labels_dict[i] = []
whwh_dict[i] = []
size_ratio = fnl_dict.get(i)
whwh_dict[i].append(size_ratio)
tmp_df = [subs[x][subs[x]['image_id']==i] for x in range(len(subs)) ]
for x in range(len(tmp_df)) :
boxes_dict[i].append(((tmp_df[x][['x_min','y_min','x_max','y_max']].values)/size_ratio ).tolist())
scores_dict[i].append(tmp_df[x]['score'].values.tolist())
labels_dict[i].append(tmp_df[x]['class_id'].values.tolist() )<statistical_test>
|
errors =(Y_pred_classes - y_true != 0)
Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = Y_pred[errors]
Y_true_errors = y_true[errors]
|
Digit Recognizer
|
4,864,231 |
weights = [1]*5
weights += [3]
weights1 = [3,2,4,5]
iou_thr = 0.25
skip_box_thr = 0.0
sigma = 0.1
fnl = {}
for i in tqdm(boxes_dict.keys()):
boxes, scores, labels = weighted_boxes_fusion(boxes_dict[i], scores_dict[i], labels_dict[i],\
weights=weights, iou_thr=iou_thr, skip_box_thr=skip_box_thr)
if not i in fnl.keys() :
fnl[i] = {'boxes':[],'scores':[],'labels':[]}
fnl[i]['boxes'] = boxes*whwh_dict[i]
fnl[i]['scores'] = scores
fnl[i]['labels'] = labels<remove_duplicates>
|
predictions = model.predict_classes(X_test, verbose=2 )
|
Digit Recognizer
|
4,864,231 |
pd_form = []
for i in fnl.keys() :
b = fnl[i]
for j in range(len(b['boxes'])) :
pd_form.append([i,int(b['labels'][j]),round(b['scores'][j],2),\
int(b['boxes'][j][0]),int(b['boxes'][j][1]),\
int(b['boxes'][j][2]),int(b['boxes'][j][3])])
final_df = pd.DataFrame(pd_form,columns = ['image_id','class_id','score','x_min','y_min','x_max','y_max'])
final_df = final_df.drop_duplicates(keep = 'first' )<categorify>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['Label'] = predictions
sub.to_csv('submission.csv',index=False )
|
Digit Recognizer
|
7,500,544 |
def submission_encoder(df:pd.DataFrame)-> np.ndarray:
dct = {}
for i in tqdm(df['image_id'].unique()):
if not i in dct.keys() :
dct[i] = []
tmp = df[df['image_id'] == i].values
for j in tmp:
dct[i].append(int(j[1]))
dct[i].append(float(j[2]))
dct[i].append(int(j[3]))
dct[i].append(int(j[4]))
dct[i].append(int(j[5]))
dct[i].append(int(j[6]))
dct[i] = map(str,dct[i])
dct[i] = ' '.join(dct[i])
dct = [[k, v] for k, v in dct.items() ]
return pd.DataFrame(dct,columns = ['image_id','PredictionString'] ).reset_index(drop = True)
df = submission_encoder(final_df)
df.to_csv('Fold5Yolo.csv', index=False )<merge>
|
%matplotlib inline
|
Digit Recognizer
|
7,500,544 |
NORMAL = "14 1 0 0 1 1"
low_threshold = 0.00
high_threshold = 0.99
pred_det_df = df
n_normal_before = len(pred_det_df.query("PredictionString == @NORMAL"))
merged_df = pd.merge(pred_det_df, pred_2cls, on="image_id", how="left")
if "target" in merged_df.columns:
merged_df["class0"] = 1 - merged_df["target"]
c0, c1, c2 = 0, 0, 0
for i in range(len(merged_df)) :
p0 = merged_df.loc[i, "class0"]
if p0 < low_threshold:
c0 += 1
elif low_threshold <= p0 and p0 < high_threshold:
if ' 14 ' not in merged_df.loc[i, "PredictionString"]:
merged_df.loc[i, "PredictionString"] += f" 14 {p0} 0 0 1 1"
c1 += 1
else:
merged_df.loc[i, "PredictionString"] = NORMAL
c2 += 1
n_normal_after = len(merged_df.query("PredictionString == @NORMAL"))
print(
f"n_normal: {n_normal_before} -> {n_normal_after} with threshold {low_threshold} & {high_threshold}"
)
print(f"Keep {c0} Add {c1} Replace {c2}")
submission_filepath = str("submission.csv")
submission_df = merged_df[["image_id", "PredictionString"]]
submission_df.to_csv(submission_filepath, index=False)
print(f"Saved to {submission_filepath}" )<import_modules>
|
train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
submit_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv' )
|
Digit Recognizer
|
7,500,544 |
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import RidgeCV<load_from_csv>
|
num_pixel = len(train_df.columns)- 1
num_pixel
|
Digit Recognizer
|
7,500,544 |
train = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv' )<count_missing_values>
|
transform_0 = transforms.Compose([
transforms.ToPILImage() ,
transforms.ToTensor() ,
transforms.Normalize([0.5], [0.5])
])
transform_1 = transforms.Compose([
transforms.ToPILImage() ,
transforms.RandomRotation(30),
transforms.ToTensor() ,
transforms.Normalize([0.5], [0.5])
])
transform_2 = transforms.Compose([
transforms.ToPILImage() ,
transforms.RandomAffine(degrees=15, translate=(0.1,0.1), scale=(0.8,0.8)) ,
transforms.ToTensor() ,
transforms.Normalize([0.5], [0.5])
])
transform_3 = transforms.Compose([
transforms.ToPILImage() ,
transforms.RandomAffine(degrees=30, scale=(1.1,1.1)) ,
transforms.ToTensor() ,
transforms.Normalize([0.5], [0.5])
])
transform_4 = transforms.Compose([
transforms.ToPILImage() ,
transforms.RandomAffine(degrees=30, translate=(0.1,0.1)) ,
transforms.ToTensor() ,
transforms.Normalize([0.5], [0.5])
])
transform_5 = transforms.Compose([
transforms.ToPILImage() ,
transforms.RandomAffine(degrees=10, shear=45),
transforms.ToTensor() ,
transforms.Normalize([0.5], [0.5])
] )
|
Digit Recognizer
|
7,500,544 |
sum(train.isnull().sum() )<count_missing_values>
|
class DataFrame_to_Dataset(Dataset):
def __init__(self, df, transform=transform_0):
if len(df.columns)== num_pixel:
self.features = df.values.reshape(( -1,28,28)).astype(np.uint8)
self.labels = None
else:
self.features = df.iloc[:,1:].values.reshape(( -1,28,28)).astype(np.uint8)
self.labels = torch.from_numpy(df.label.values)
self.transform = transform
def __len__(self):
return len(self.features)
def __getitem__(self, index):
if self.labels is not None:
return self.transform(self.features[index]), self.labels[index]
else:
return self.transform(self.features[index] )
|
Digit Recognizer
|
7,500,544 |
sum(test.isnull().sum() )<concatenate>
|
def create_dataloaders(seed, test_size=0.1, df=train_df, batch_size=32):
train_data, valid_data = train_test_split(df,
test_size=test_size,
random_state=seed)
train_dataset_0 = DataFrame_to_Dataset(train_data)
train_dataset_1 = DataFrame_to_Dataset(train_data, transform_1)
train_dataset_2 = DataFrame_to_Dataset(train_data, transform_2)
train_dataset_3 = DataFrame_to_Dataset(train_data, transform_3)
train_dataset_4 = DataFrame_to_Dataset(train_data, transform_4)
train_dataset_5 = DataFrame_to_Dataset(train_data, transform_5)
train_dataset = ConcatDataset([train_dataset_0, train_dataset_1, train_dataset_2, train_dataset_3, train_dataset_4, train_dataset_5])
valid_dataset = DataFrame_to_Dataset(valid_data)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
return train_loader, valid_loader
|
Digit Recognizer
|
7,500,544 |
house_data = pd.concat([train.iloc[:,:-1], test],axis=0)
house_data = house_data.drop(columns=['Id'], axis=1)
sep = len(train )<data_type_conversions>
|
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=5, stride=2, padding=14),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Dropout2d(0.4)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=6),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Dropout2d(0.3)
)
self.conv3 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=4),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Dropout2d(0.2)
)
self.fc = nn.Sequential(
nn.Linear(128*1*1, 10)
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(-1, 128*1*1)
x = self.fc(x)
return x
|
Digit Recognizer
|
7,500,544 |
n_columns = [name for name in house_data.columns if house_data[name].dtype in ['int64', 'float64']]
w_columns = [name for name in house_data.columns if house_data[name].dtype == "object"]
values = {}
for a in w_columns:
values[a] = 'UNKNOWN'
for a in n_columns:
values[a] = house_data[a].median()
house_data.fillna(value=values,inplace=True)
house_data.head()<drop_column>
|
use_cuda = torch.cuda.is_available()
print(use_cuda )
|
Digit Recognizer
|
7,500,544 |
house_data = house_data.drop(['PoolQC'], axis=1 )<categorify>
|
def train(seed, num_epochs):
print('Creating new dataloaders...')
train_loader, valid_loader = create_dataloaders(seed=seed)
print('Creating a new model...')
net = Net()
criterion = nn.CrossEntropyLoss()
if use_cuda:
net.cuda()
criterion.cuda()
optimizer = optim.Adam(net.parameters() ,
lr=0.003, betas=(0.9, 0.999),
eps=1e-08, weight_decay=0,
amsgrad=False)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
print('Training the model...')
for epoch in range(num_epochs):
net.train()
t0 = time.time()
training_loss = 0.0
num_samples = 0
for features, labels in train_loader:
if use_cuda:
features = features.cuda()
labels = labels.cuda()
optimizer.zero_grad()
outputs = net(features)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
training_loss += loss.item()
num_samples += len(features)
net.eval()
correct = 0
total = 0
with torch.no_grad() :
for valid_features, valid_labels in valid_loader:
if use_cuda:
valid_features = valid_features.cuda()
valid_labels = valid_labels.cuda()
outputs = net(valid_features)
_, predicted = torch.max(outputs, 1)
total += valid_labels.size(0)
correct +=(predicted == valid_labels ).sum().item()
scheduler.step()
print('[model %d, epoch %d, time: %.3f seconds] train_loss: %.5f, val_acc: %4f %%' %
(seed + 1, epoch + 1, time.time() - t0, training_loss/num_samples, 100 * correct / total))
net.eval()
test_pred = torch.LongTensor()
if use_cuda:
test_pred = test_pred.cuda()
with torch.no_grad() :
for features in test_loader:
if use_cuda:
features = features.cuda()
outputs = net(features)
_, predicted = torch.max(outputs, 1)
test_pred = torch.cat(( test_pred, predicted), dim=0)
model_name = 'model_' + str(seed + 1)
ensemble_df[model_name] = test_pred.cpu().numpy()
print('Prediction Saved!
' )
|
Digit Recognizer
|
7,500,544 |
house_data = pd.get_dummies(house_data )<prepare_x_and_y>
|
test_dataset = DataFrame_to_Dataset(test_df)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False)
ensemble_df = submit_df.copy()
num_models = 23
num_epochs = 6
for seed in range(num_models):
train(seed, num_epochs )
|
Digit Recognizer
|
7,500,544 |
y_train = train.iloc[:,-1]
X_train = house_data.iloc[:sep, :]
X_test = house_data.iloc[sep:, :]<train_model>
|
final_pred = ensemble_df.iloc[:,2:].mode(axis=1 ).iloc[:,0]
submit_df.Label = final_pred.astype(int)
submit_df.head()
|
Digit Recognizer
|
7,500,544 |
search_rf = RandomForestRegressor(n_estimators = 100, random_state=0)
search_rf.fit(X_train, y_train)
y_test_rf = search_rf.predict(X_test)
print('Random forest accuracy:', search_rf.score(X_train, y_train))<save_to_csv>
|
submit_df.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
5,742,147 |
output1 = pd.DataFrame({'Id': test.Id.values, 'SalePrice': y_test_rf})
output1.to_csv('output1.csv', index=False )<find_best_params>
|
train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
train_labels = train_data["label"]
train_data = train_data.drop(['label'], axis = 1)
train_data = train_data /255.
test_data = test_data / 255.
train_data = train_data.values.reshape(train_data.shape[0],28,28,1)
test_data = test_data.values.reshape(test_data.shape[0],28,28,1)
labels = to_categorical(train_labels)
X_train, X_test, y_train, y_test = train_test_split(train_data, labels, test_size = 0.2, random_state = 42 )
|
Digit Recognizer
|
5,742,147 |
search_cv = RidgeCV(alphas =(0.001, 0.01, 0.05, 0.1, 0.3, 0.5, 1, 3, 5, 10))
search_cv.fit(X_train, y_train)
y_test_cv = search_cv.predict(X_test)
print('RidgeCV accuracy:', search_cv.score(X_train, y_train))<save_to_csv>
|
learning_rate_cb = ReduceLROnPlateau(monitor = 'val_acc',
patience = 2,
verbose = 1,
factor = 0.5,
min_lr = 1e-5 )
|
Digit Recognizer
|
5,742,147 |
output2 = pd.DataFrame({'Id': test.Id.values, 'SalePrice': y_test_cv})
output2.to_csv('output2.csv', index=False )<save_to_csv>
|
datagen = ImageDataGenerator(
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1,
rotation_range = 10
)
datagen.fit(X_train )
|
Digit Recognizer
|
5,742,147 |
prediction =(y_test_rf + y_test_cv)/2
output = pd.DataFrame({'Id': test.Id.values, 'SalePrice': prediction})
output.to_csv('submission.csv', index=False )<import_modules>
|
batchsize = 512
num_epochs = 30
n_model_runs = 10
modellist = list()
for i in range(n_model_runs):
print("+++++++++ running model number", i+1)
model = models.Sequential([
Conv2D(16, [5,5], activation = 'relu', padding = 'same', input_shape = [28,28,1]),
MaxPooling2D([2,2]),
Conv2D(32, [5,5], activation = 'relu', padding = 'same'),
MaxPooling2D([2,2]),
Conv2D(64, [3,3], activation = 'relu', padding = 'same'),
MaxPooling2D([2,2]),
Conv2D(64, [3,3], activation = 'relu', padding = 'same'),
MaxPooling2D([2,2]),
Flatten() ,
Dense(512, activation = 'relu'),
Dropout(0.3),
Dense(1024, activation = 'relu'),
Dropout(0.5),
Dense(10, activation = 'softmax')
])
model.compile(optimizer = Adam(lr=1e-3),
loss='categorical_crossentropy',
metrics = ['accuracy'])
model.fit_generator(datagen.flow(X_train, y_train, batchsize),
epochs = num_epochs,
steps_per_epoch = X_train.shape[0]/batchsize,
validation_data =(X_test, y_test),
callbacks=[learning_rate_cb],
verbose = 1)
modellist.append(model )
|
Digit Recognizer
|
5,742,147 |
import os
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import mutual_info_regression
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, OrdinalEncoder, StandardScaler
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
from sklearn.ensemble import RandomForestRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_error
import optuna<set_options>
|
prediction = [model.predict(test_data)for model in modellist]
prediction = np.sum(prediction, axis=0)
prediction = np.argmax(prediction,axis=1 )
|
Digit Recognizer
|
5,742,147 |
<drop_column><EOS>
|
submission = pd.DataFrame({"ImageId": list(range(1, len(prediction)+1)) , "Label": prediction})
submission.to_csv('submission.csv', index = False, header = True )
|
Digit Recognizer
|
1,186,370 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
|
config = tf.ConfigProto(device_count = {'GPU': 1 , 'CPU': 4})
sess = tf.Session(config=config)
keras.backend.set_session(sess)
K.set_image_dim_ordering('tf' )
|
Digit Recognizer
|
1,186,370 |
def load_data() :
train = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv')
X_train = train.copy()
y_train = X_train['SalePrice']
test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv')
X_test = test.copy()
X_train = clean(X_train)
X_test = clean(X_test)
all_features = ['MSSubClass','MSZoning','LotFrontage','LotArea','Street','Alley','LotShape','LandContour','Utilities','LotConfig','LandSlope','Neighborhood','Condition1','Condition2','BldgType','HouseStyle','OverallQual','OverallCond','YearBuilt','YearRemodAdd','RoofStyle','RoofMatl','Exterior1st','Exterior2nd','MasVnrType','MasVnrArea','ExterQual','ExterCond','Foundation','BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinSF1','BsmtFinType2','BsmtFinSF2','BsmtUnfSF','TotalBsmtSF','Heating','HeatingQC','CentralAir','Electrical','1stFlrSF','2ndFlrSF','LowQualFinSF','GrLivArea','BsmtFullBath','BsmtHalfBath','FullBath','HalfBath','BedroomAbvGr','KitchenAbvGr','KitchenQual','TotRmsAbvGrd','Functional','Fireplaces','FireplaceQu','GarageType','GarageYrBlt','GarageFinish','GarageCars','GarageArea','GarageQual','GarageCond','PavedDrive','WoodDeckSF','OpenPorchSF','EnclosedPorch','3SsnPorch','ScreenPorch','PoolArea','PoolQC','Fence','MoSold','YrSold','SaleType','SaleCondition','SalePrice','MiscFeatureShed','MiscFeatureGar2','MiscFeatureOthr']
num_features = ['BsmtFullBath','BsmtHalfBath','FullBath','HalfBath','BedroomAbvGr','KitchenAbvGr','TotRmsAbvGrd','Fireplaces','GarageCars','LotFrontage','LotArea','MasVnrArea','BsmtFinSF1','BsmtFinSF2','BsmtUnfSF','TotalBsmtSF','1stFlrSF','2ndFlrSF','LowQualFinSF','GrLivArea','GarageArea','WoodDeckSF','OpenPorchSF','EnclosedPorch','3SsnPorch','ScreenPorch','PoolArea','MiscFeatureGar2','MiscFeatureOthr','MiscFeatureShed','MoSold','YearBuilt','YearRemodAdd','YrSold','GarageYrBlt']
num_continuous_features = ['LotFrontage','LotArea','MasVnrArea','BsmtFinSF1','BsmtFinSF2','BsmtUnfSF','TotalBsmtSF','1stFlrSF','2ndFlrSF','LowQualFinSF','GrLivArea', 'GarageArea','WoodDeckSF','OpenPorchSF','EnclosedPorch','3SsnPorch','ScreenPorch','PoolArea','MiscFeatureGar2','MiscFeatureOthr','MiscFeatureShed','MoSold','YearBuilt','YearRemodAdd','YrSold','GarageYrBlt']
num_discrete_features=['BsmtFullBath','BsmtHalfBath','FullBath','HalfBath','BedroomAbvGr','KitchenAbvGr','TotRmsAbvGrd','Fireplaces','GarageCars']
cat_features = ['LotShape','LandContour','OverallQual','OverallCond','Utilities','LandSlope','ExterQual','ExterCond','BsmtQual','BsmtCond','BsmtExposure','HeatingQC','KitchenQual','FireplaceQu','GarageFinish','GarageQual','GarageCond','PoolQC','Fence','MSSubClass','MSZoning','Street','Alley','LotConfig','Neighborhood','Condition1','Condition2','BldgType','HouseStyle','RoofStyle','RoofMatl','Exterior1st','Exterior2nd','MasVnrType','Foundation','BsmtFinType1','BsmtFinType2','Heating','CentralAir','Electrical','Functional','GarageType','PavedDrive','SaleType','SaleCondition']
cat_features_to_encode = cat_features.copy()
cat_features_to_encode.remove('OverallQual')
cat_features_to_encode.remove('OverallCond')
return train, test, X_train, y_train, X_test, all_features, num_features, num_continuous_features, num_discrete_features, cat_features, cat_features_to_encode<train_on_grid>
|
train_data = np.genfromtxt('.. /input/train.csv', delimiter=',')[1:]
train_X = train_data[:, 1:]
train_y_orig = train_data[:, :1]
train_y = np.zeros([train_y_orig.shape[0], 10])
for ind in range(train_y_orig.shape[0]):
train_y[ind][int(train_y_orig[ind][0])] = 1
|
Digit Recognizer
|
1,186,370 |
def cv_loop(
X_train,
y_train,
X_test,
model,
useful_features,
num_features,
cat_features,
cat_features_to_encode,
encoding = 'ohe',
new_features=[],
scaling=False,
clip=False, clipmin=np.log(34900), clipmax=np.log(755000),
tuning=True,
early_stopping=True
):
y_train = np.log(y_train)
num_features = np.intersect1d(num_features, useful_features)
cat_features = np.intersect1d(cat_features, useful_features)
cat_features_to_encode = np.intersect1d(cat_features_to_encode, useful_features)
cum_rmse_val = 0
iteration = 1
N_SPLITS = 5
kf = KFold(n_splits=N_SPLITS, shuffle=True, random_state=42)
for train_index, val_index in kf.split(X_train, y_train):
X_train_, X_val_ = X_train.iloc[train_index], X_train.iloc[val_index]
y_train_, y_val_ = y_train[train_index], y_train[val_index]
X_train__ = X_train_.reset_index(drop=True)
X_val__ = X_val_.reset_index(drop=True)
X_train__ = X_train__.loc[:, useful_features]
X_val__ = X_val__.loc[:, useful_features]
if(tuning==False):
useful_features_ = useful_features.copy()
useful_features_.remove('SalePrice')
X_test__ = X_test.loc[:, useful_features_]
if(new_features.count('NbNAs')==1):
X_train__['NbNAs'] = X_train__.isnull().sum(axis=1)
X_val__['NbNAs'] = X_val__.isnull().sum(axis=1)
if(tuning==False): X_test__['NbNAs'] = X_test__.isnull().sum(axis=1)
for col in num_features:
X_train__[col] = X_train__[col].fillna(0)
X_val__[col] = X_val__[col].fillna(0)
if(tuning==False): X_test__[col] = X_test__[col].fillna(0)
for col in cat_features:
X_train__[col] = X_train__[col].fillna("None")
X_val__[col] = X_val__[col].fillna("None")
if(tuning==False): X_test__[col] = X_test__[col].fillna("None")
if(new_features.count('LivLotRatio')==1):
X_train__['LivLotRatio'] = X_train__['GrLivArea'] / X_train__['LotArea']
X_val__['LivLotRatio'] = X_val__['GrLivArea'] / X_val__['LotArea']
if(tuning==False): X_test__['LivLotRatio'] = X_test__['GrLivArea'] / X_test__['LotArea']
if(new_features.count('Spaciousness')==1):
X_train__['Spaciousness'] = X_train__['GrLivArea'] / X_train__['TotRmsAbvGrd']
X_val__['Spaciousness'] = X_val__['GrLivArea'] / X_val__['TotRmsAbvGrd']
if(tuning==False): X_test__['Spaciousness'] = X_test__['GrLivArea'] / X_test__['TotRmsAbvGrd']
if(new_features.count('MedNhbdArea')==1):
feat = X_train__.groupby('Neighborhood')['GrLivArea'].median()
feat = feat.to_dict()
X_train__.loc[:,'MedNhbdArea'] = X_train__['Neighborhood'].map(feat)
X_val__.loc[:,'MedNhbdArea'] = X_val__['Neighborhood'].map(feat)
if(tuning==False): X_test__.loc[:,'MedNhbdArea'] = X_test__['Neighborhood'].map(feat)
if(new_features.count('GrLivAreaInNbhd')==1):
feat = X_train__.groupby('Neighborhood')['GrLivArea'].median()
feat = feat.to_dict()
X_train__.loc[:,'GrLivAreaInNbhd'] = X_train__['Neighborhood'].map(feat)
X_train__['GrLivAreaInNbhd'] = X_train__['GrLivArea'] - X_train__['GrLivAreaInNbhd']
X_val__.loc[:,'GrLivAreaInNbhd'] = X_val__['Neighborhood'].map(feat)
X_val__['GrLivAreaInNbhd'] = X_val__['GrLivArea'] - X_val__['GrLivAreaInNbhd']
if(tuning==False):
X_test__.loc[:,'GrLivAreaInNbhd'] = X_test__['Neighborhood'].map(feat)
X_test__['GrLivAreaInNbhd'] = X_test__['GrLivArea'] - X_test__['GrLivAreaInNbhd']
if(new_features.count('MedNhbdArea_Ext')==1):
feat = X_train__.groupby('Neighborhood')['LotArea'].median()
feat = feat.to_dict()
X_train__.loc[:,'MedNhbdArea_Ext'] = X_train__['Neighborhood'].map(feat)
X_val__.loc[:,'MedNhbdArea_Ext'] = X_val__['Neighborhood'].map(feat)
if(tuning==False): X_test__.loc[:,'MedNhbdArea_Ext'] = X_test__['Neighborhood'].map(feat)
if(new_features.count('LotAreaInNbhd')==1):
feat = X_train__.groupby('Neighborhood')['LotArea'].median()
feat = feat.to_dict()
X_train__.loc[:,'LotAreaInNbhd'] = X_train__['Neighborhood'].map(feat)
X_train__['LotAreaInNbhd'] = X_train__['LotArea'] - X_train__['LotAreaInNbhd']
X_val__.loc[:,'LotAreaInNbhd'] = X_val__['Neighborhood'].map(feat)
X_val__['LotAreaInNbhd'] = X_val__['LotArea'] - X_val__['LotAreaInNbhd']
if(tuning==False):
X_test__.loc[:,'LotAreaInNbhd'] = X_test__['Neighborhood'].map(feat)
X_test__['LotAreaInNbhd'] = X_test__['LotArea'] - X_test__['LotAreaInNbhd']
if(new_features.count('OverallQualCondProduct')==1):
X_train__['OverallQualCondProduct'] = X_train__['OverallQual'] * X_train__['OverallCond']
X_val__['OverallQualCondProduct'] = X_val__['OverallQual'] * X_val__['OverallCond']
if(tuning==False): X_test__['OverallQualCondProduct'] = X_test__['OverallQual'] * X_test__['OverallCond']
if(new_features.count('LowQualFinRatio')==1):
X_train__['LowQualFinRatio'] = X_train__['LowQualFinSF'] / X_train__['GrLivArea']
X_val__['LowQualFinRatio'] = X_val__['LowQualFinSF'] / X_val__['GrLivArea']
if(tuning==False): X_test__['LowQualFinRatio'] = X_test__['LowQualFinSF'] / X_test__['GrLivArea']
if(encoding=='ohe'):
enc = OneHotEncoder(handle_unknown = 'ignore')
X_train__enc = pd.DataFrame(enc.fit_transform(X_train__[cat_features_to_encode] ).toarray())
X_val__enc = pd.DataFrame(enc.transform(X_val__[cat_features_to_encode] ).toarray())
X_train__enc.columns = enc.get_feature_names(cat_features_to_encode)
X_val__enc.columns = enc.get_feature_names(cat_features_to_encode)
X_train__ = X_train__.join(X_train__enc)
X_val__ = X_val__.join(X_val__enc)
if(tuning==False):
X_test__enc = pd.DataFrame(enc.transform(X_test__[cat_features_to_encode] ).toarray())
X_test__enc.columns = enc.get_feature_names(cat_features_to_encode)
X_test__ = X_test__.join(X_test__enc)
elif(encoding=='ord'):
enc = OrdinalEncoder()
X_train__enc = pd.DataFrame(enc.fit_transform(X_train__[cat_features_to_encode]))
X_val__enc = pd.DataFrame(enc.transform(X_val__[cat_features_to_encode]))
X_train__enc.columns = [cat_features_to_encode]
X_val__enc.columns = [cat_features_to_encode]
X_train__ = X_train__.join(X_train__enc, rsuffix='_ord_enc')
X_val__ = X_val__.join(X_val__enc, rsuffix='_ord_enc')
if(tuning==False):
X_test__enc = pd.DataFrame(enc.transform(X_test__[cat_features_to_encode]))
X_test__enc.columns = [cat_features_to_encode]
X_test__ = X_test__.join(X_test__enc, rsuffix='_ord_enc')
elif(encoding=='tar_enc'):
for f in cat_features_to_encode:
feat = X_train__.groupby(f)['SalePrice'].mean()
feat = feat.to_dict()
X_train__.loc[:,f"tar_enc_{f}"] = X_train__[f].map(feat)
X_val__.loc[:,f"tar_enc_{f}"] = X_val__[f].map(feat)
if(tuning==False): X_test__.loc[:,f"tar_enc_{f}"] = X_test__[f].map(feat)
X_train__.drop(columns='SalePrice', inplace=True)
X_train__.drop(columns=cat_features_to_encode, inplace=True)
X_val__.drop(columns='SalePrice', inplace=True)
X_val__.drop(columns=cat_features_to_encode, inplace=True)
if(tuning==False): X_test__.drop(columns=cat_features_to_encode, inplace=True)
X_train__.to_csv(f'X_train__{iteration}.csv', index = False)
X_val__.to_csv(f'X_val__{iteration}.csv', index = False)
if(tuning==False): X_test__.to_csv(f'X_test__{iteration}.csv', index = False)
if scaling:
scaler = StandardScaler()
X_train__ = pd.DataFrame(scaler.fit_transform(X_train__), columns=X_train__.columns)
X_val__ = pd.DataFrame(scaler.transform(X_val__), columns=X_val__.columns)
if(tuning==False): X_test__ = pd.DataFrame(scaler.transform(X_test__), columns=X_test__.columns)
X_train__.to_csv(f'X_train__{iteration}__scaled.csv', index = False)
X_val__.to_csv(f'X_val__{iteration}__scaled.csv', index = False)
if(tuning==False): X_test__.to_csv(f'X_test__{iteration}__scaled.csv', index = False)
if early_stopping:
model.fit(X_train__, y_train_, eval_set=[(X_val__, y_val_)], early_stopping_rounds=100, verbose=False)
else:
model.fit(X_train__, y_train_)
y_val_preds = model.predict(X_val__)
if(clip): y_val_preds = np.clip(y_val_preds,clipmin,clipmax)
if(tuning==False):
if iteration==1:
oof_preds = pd.Series(data=y_val_preds,index=val_index)
else:
oof_preds = pd.concat([oof_preds, pd.Series(data=y_val_preds,index=val_index)])
rmse_val = mean_squared_error(y_val_, y_val_preds, squared=False)
print(str(iteration)+ '/' + str(N_SPLITS)+ ' KFold RMSLE: ' + str(rmse_val))
cum_rmse_val = cum_rmse_val + rmse_val
if(tuning==False):
new_preds = model.predict(X_test__)
if(clip): new_preds = np.clip(new_preds,clipmin,clipmax)
if iteration==1:
preds = new_preds
else:
preds = preds + new_preds
iteration = iteration + 1
if(tuning==False): preds = preds/N_SPLITS
avg_rmse = cum_rmse_val/N_SPLITS
print('Average RMSLE: ' + str(avg_rmse))
if tuning:
return avg_rmse, None, None
else:
return avg_rmse, np.exp(oof_preds.sort_index()), np.exp(preds )<split>
|
model = Sequential()
input_shape =(28, 28, 1)
model.add(Conv2D(32, kernel_size=(7, 7), padding='same', activation='relu', input_shape=input_shape))
model.add(Conv2D(32, kernel_size=(7, 7), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size=(5, 5), strides=(2,2), padding='same', activation='relu'))
model.add(Conv2D(64, kernel_size=(5, 5), strides=(2,2), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, kernel_size=(3, 3), strides=(3,3), padding='same', activation='relu'))
model.add(Conv2D(128, kernel_size=(3, 3), strides=(3,3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(10, activation='softmax'))
opt = RMSprop()
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'] )
|
Digit Recognizer
|
1,186,370 |
_, _, X_train, y_train, X_test, all_features, num_features, num_continuous_features, num_discrete_features, cat_features, cat_features_to_encode = load_data()
X_train.head(2 )<create_dataframe>
|
batch_size=2**8
epochs = 100
n = train_X.shape[0]
train_X = train_X.reshape(( n, 28, 28, 1)).astype('float32')/ 255
history = model.fit(train_X, train_y,
epochs=epochs,
batch_size=batch_size )
|
Digit Recognizer
|
1,186,370 |
feature_types = pd.DataFrame(data=[num_features, cat_features])
feature_types.index=['Numerical','Categorical']
feature_types.style.set_table_styles([
{'selector': 'thead', 'props': [('display', 'none')]}
] )<concatenate>
|
print(model.evaluate(train_X, train_y))
|
Digit Recognizer
|
1,186,370 |
f_with_na_train = X_train.isna().sum(axis=0)
f_with_na_train = f_with_na_train[f_with_na_train>0]
f_with_na_train.name='Nb of NaNs in train'
f_with_na_test = X_test.isna().sum(axis=0)
f_with_na_test = f_with_na_test[f_with_na_test>0]
f_with_na_test.name='Nb of NaNs in test'
f_with_na = pd.concat([f_with_na_train, f_with_na_test], axis=1)
f_with_na.fillna(0, inplace=True)
f_with_na = f_with_na[['Nb of NaNs in train','Nb of NaNs in test']]
f_with_na.sort_values(['Nb of NaNs in train', 'Nb of NaNs in test'], ascending=False)[['Nb of NaNs in train', 'Nb of NaNs in test']].plot(kind='bar', figsize=(20,4))
plt.show()<define_variables>
|
test_data = np.genfromtxt('.. /input/test.csv', delimiter=',')[1:]
|
Digit Recognizer
|
1,186,370 |
_, _, X_train, y_train, X_test, all_features, num_features, _, _, cat_features, cat_features_to_encode = load_data()
useful_features = [e for e in all_features if e not in('PoolArea','3SsnPorch','MoSold','YrSold','RoofMatl','Utilities','MiscFeatureGar2','PoolQC')]
new_features = ['NbNAs','LivLotRatio','Spaciousness','MedNhbdArea','GrLivAreaInNbhd','MedNhbdArea_Ext','LotAreaInNbhd','OverallQualCondProduct','LowQualFinRatio']
encoding = 'tar_enc'
scaling = True
clip = False
early_stopping = True<define_search_space>
|
predictions = model.predict(test_data.reshape(( test_data.shape[0], 28, 28, 1)).astype('float32')/ 255)
predictions = predictions.argmax(1 )
|
Digit Recognizer
|
1,186,370 |
<find_best_params><EOS>
|
sub_data = np.zeros([predictions.shape[0], 2])
count = 0
for val in predictions:
sub_data[count] = [count + 1, val]
count += 1
sub_data = sub_data.astype(int)
np.savetxt(fname="submission.csv",
X=sub_data,
fmt='%i',
delimiter=',',
comments='',
header='ImageId,Label' )
|
Digit Recognizer
|
4,566,279 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.datasets import mnist
from keras.models import Model, Sequential
from keras.models import load_model
from keras.layers import Input, Conv2D, MaxPooling2D, BatchNormalization
from keras.layers import UpSampling2D, Dropout, Dense, Flatten
from keras.callbacks import TensorBoard
|
Digit Recognizer
|
4,566,279 |
if TUNING_XGB:
model = XGBRegressor(
tree_method='gpu_hist',
predictor='gpu_predictor',
n_jobs=4,
**trial.params)
else:
params = {
'max_depth': 5,
'n_estimators': 7779,
'eta': 0.0044144556312306175,
'subsample': 0.30000000000000004,
'colsample_bytree': 0.2,
'colsample_bylevel': 0.4,
'min_child_weight': 0.21792841014662054,
'reg_lambda': 5.06808562586094,
'reg_alpha': 0.036826697275635915,
'gamma': 0.002452743312016066,
}
model = XGBRegressor(
tree_method='gpu_hist',
predictor='gpu_predictor',
n_jobs=4,
**params)
avg_rmse, oof_preds, preds = cv_loop(
X_train=X_train,
y_train=y_train,
X_test=X_test,
model=model,
useful_features=all_features,
num_features=num_features,
cat_features=cat_features,
cat_features_to_encode=cat_features_to_encode,
encoding = encoding,
new_features=new_features,
scaling = scaling,
clip = clip, clipmin=np.log(34900), clipmax=np.log(755000),
tuning=False,
early_stopping = early_stopping
)<save_to_csv>
|
train_dir = ".. /input/train.csv"
test_dir = ".. /input/test.csv"
|
Digit Recognizer
|
4,566,279 |
submission = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/sample_submission.csv')
submission['SalePrice'] = preds
submission.to_csv('xgb_preds.csv', index = False)
oof_preds.to_csv('xgb_oof_preds.csv', header=False )<categorify>
|
df_train = pd.read_csv(train_dir)
df_train.info()
|
Digit Recognizer
|
4,566,279 |
_, _, X_train, y_train, X_test, all_features, num_features, _, _, cat_features, cat_features_to_encode = load_data()
useful_features = [e for e in all_features if e not in('PoolArea','3SsnPorch','MoSold','YrSold','RoofMatl','Utilities','MiscFeatureGar2','PoolQC')]
new_features = ['NbNAs','LivLotRatio','Spaciousness','MedNhbdArea','GrLivAreaInNbhd','MedNhbdArea_Ext','LotAreaInNbhd','OverallQualCondProduct','LowQualFinRatio']
encoding = 'ohe'
scaling = True
clip = True
early_stopping = False<compute_train_metric>
|
y_train = df_train['label']
X_train = df_train.drop(columns=['label'] )
|
Digit Recognizer
|
4,566,279 |
def objective(trial):
param_grid = {
'alpha': trial.suggest_loguniform('alpha', 0.0001, 10000),
'max_iter': trial.suggest_loguniform('max_iter', 1000, 900000),
'random_state' : 42
}
model = Lasso(
**param_grid
)
avg_rmse, _, _ = cv_loop(
X_train = X_train,
y_train = y_train,
X_test = X_test,
model = model,
useful_features = useful_features,
num_features = num_features,
cat_features = cat_features,
cat_features_to_encode = cat_features_to_encode,
encoding = encoding,
new_features = new_features,
scaling = scaling,
clip = clip, clipmin=np.log(34900), clipmax=np.log(755000),
tuning = True,
early_stopping = early_stopping
)
return avg_rmse<find_best_params>
|
display_image(X_train, y_train, n=10, label=True )
|
Digit Recognizer
|
4,566,279 |
if TUNING_LASSO:
study = optuna.create_study(direction='minimize', study_name=STUDY_NAME)
study.optimize(objective, n_trials=50)
print('Number of finished trials: ', len(study.trials))
print('Best trial:')
trial = study.best_trial
print('\tValue: {}'.format(trial.value))
print('\tParams: ')
for key, value in trial.params.items() :
print('\t\t{}: {}'.format(key, value))<train_on_grid>
|
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, shuffle=False )
|
Digit Recognizer
|
4,566,279 |
if TUNING_LASSO:
model = Lasso(**trial.params, random_state=42)
else:
params = {
'alpha': 0.0018185000964940012,
'max_iter': 21098,
'random_state' : 42
}
model = Lasso(**params)
avg_rmse, oof_preds, preds = cv_loop(
X_train = X_train,
y_train = y_train,
X_test = X_test,
model = model,
useful_features = useful_features,
num_features = num_features,
cat_features = cat_features,
cat_features_to_encode = cat_features_to_encode,
encoding = encoding,
new_features = new_features,
scaling = scaling,
clip = clip, clipmin = np.log(34900), clipmax = np.log(755000),
tuning = False,
early_stopping = early_stopping
)<save_to_csv>
|
X_train = X_train.values.reshape(-1, 28,28,1)
X_val = X_val.values.reshape(-1, 28,28,1 )
|
Digit Recognizer
|
4,566,279 |
submission = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/sample_submission.csv')
submission['SalePrice'] = preds
submission.to_csv('lasso_preds.csv', index = False)
oof_preds.to_csv('lasso_oof_preds.csv', header=False )<load_from_csv>
|
X_train = X_train / 255.0
X_val = X_val / 255.0
|
Digit Recognizer
|
4,566,279 |
X_train, y_train = oof_preds[['xgb_oof_preds','lasso_oof_preds']], oof_preds['y_train']
xgb_preds = pd.read_csv('xgb_preds.csv' ).iloc[:,1]
lasso_preds = pd.read_csv('lasso_preds.csv' ).iloc[:,1]
X_test = pd.concat([np.log(xgb_preds), np.log(lasso_preds)], axis=1 )<find_best_model_class>
|
Y_train = pd.get_dummies(y_train ).values
Y_val = pd.get_dummies(y_val ).values
|
Digit Recognizer
|
4,566,279 |
metamodel = LinearRegression()
cum_rmse_val = 0
iteration = 1
kf = KFold(n_splits=5, shuffle=True, random_state=42)
for train_index, val_index in kf.split(X_train, y_train):
X_train_, X_val_ = X_train.iloc[train_index], X_train.iloc[val_index]
y_train_, y_val_ = y_train[train_index], y_train[val_index]
metamodel.fit(X_train_, y_train_)
y_val_preds = metamodel.predict(X_val_)
rmse_val = mean_squared_error(y_val_, y_val_preds, squared=False)
print(rmse_val)
cum_rmse_val = cum_rmse_val + rmse_val
new_preds=metamodel.predict(X_test)
if(iteration==1):
preds = new_preds
else:
preds = preds + new_preds
iteration = iteration+1
print(cum_rmse_val/5)
preds = preds / 5<save_to_csv>
|
print("La valeur {} est encodée vers le vecteur {}".format(y_train[0], Y_train[0]))
print("valeur {} transformée en vecteur: {}".format(y_train[20], Y_train[20]))
|
Digit Recognizer
|
4,566,279 |
submission = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/sample_submission.csv')
submission['SalePrice'] = np.exp(preds)
submission.to_csv('stacking_preds.csv', index = False )<import_modules>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=20,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False )
|
Digit Recognizer
|
4,566,279 |
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))<load_from_csv>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5, 5), activation='relu', padding='Same', input_shape =(28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(filters = 32, kernel_size =(5, 5), activation='relu', padding='Same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(strides=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(strides=(2,2)))
model.add(Dropout(0.25))
|
Digit Recognizer
|
4,566,279 |
train = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv' )<count_missing_values>
|
model.add(Flatten())
model.add(Dense(units=1024, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(units=1024, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(units=10, activation='softmax'))
|
Digit Recognizer
|
4,566,279 |
Null_train = train.isnull().sum()
Null_train[Null_train > 0]<drop_column>
|
model.compile(loss='categorical_crossentropy', optimizer = Adam(lr=0.0001), metrics=["accuracy"] )
|
Digit Recognizer
|
4,566,279 |
train.drop(['Alley', 'PoolQC', 'Fence', 'MiscFeature', 'Id'], axis = 1, inplace = True )<define_variables>
|
hist = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=32),
steps_per_epoch=1000,
epochs=25,
verbose=1,
validation_data=(X_val, Y_val))
|
Digit Recognizer
|
4,566,279 |
Null_train_data = train[['LotFrontage','MasVnrType', 'MasVnrArea', 'FireplaceQu',
'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1',
'BsmtFinType2', 'Electrical', 'GarageType', 'GarageYrBlt',
'GarageFinish', 'GarageQual', 'GarageCond']]<count_unique_values>
|
final_loss, final_acc = model.evaluate(X_val, Y_val, verbose=0)
print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
|
Digit Recognizer
|
4,566,279 |
def view_null_data(data):
return pd.DataFrame({"Data Type":data.dtypes, "Unique Count":data.apply(lambda x: x.nunique() ,axis=0),
"Null Count": data.isnull().sum() } )<feature_engineering>
|
Y_hat = model.predict(X_val)
Y_hat[0]
|
Digit Recognizer
|
4,566,279 |
train['LotFrontage'] = train['LotFrontage'].fillna(train.LotFrontage.mean())
train['GarageYrBlt'] = train['GarageYrBlt'].fillna(train.GarageYrBlt.mean())
train['MasVnrArea'] = train['MasVnrArea'].fillna(train.MasVnrArea.mode() [0] )<count_missing_values>
|
Y_pred = np.argmax(Y_hat, axis=1)
Y_true = np.argmax(Y_val, axis=1 )
|
Digit Recognizer
|
4,566,279 |
Null_test = test.isnull().sum()
Null_test[Null_test > 0]<drop_column>
|
cm = confusion_matrix(Y_true, Y_pred)
print(cm )
|
Digit Recognizer
|
4,566,279 |
Id = test['Id']
test.drop(['Alley', 'PoolQC', 'Fence', 'MiscFeature', 'Id'], axis = 1, inplace = True )<drop_column>
|
X_test = pd.read_csv(test_dir)
X_test = X_test.values.reshape(-1, 28,28,1)
X_test = X_test / 255.0
|
Digit Recognizer
|
4,566,279 |
Null_test_data = test[['MSZoning', 'LotFrontage', 'Utilities', 'Exterior1st',
'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'BsmtQual','FireplaceQu',
'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinSF1',
'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF',
'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functional',
'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual',
'GarageCars','GarageArea','GarageQual', 'GarageCond', 'SaleType']]<feature_engineering>
|
Y_hat = model.predict(X_test, verbose=1)
Y_pred = np.argmax(Y_hat, axis=1 )
|
Digit Recognizer
|
4,566,279 |
test['LotFrontage'] = test['LotFrontage'].fillna(test['LotFrontage'].mean())
test['BsmtFinSF1'] = test['BsmtFinSF1'].fillna(test['BsmtFinSF1'].mean())
test['BsmtUnfSF'] = test['BsmtUnfSF'].fillna(test['BsmtUnfSF'].mean())
test['TotalBsmtSF'] = test['TotalBsmtSF'].fillna(test['TotalBsmtSF'].mean())
test['GarageYrBlt'] = test['GarageYrBlt'].fillna(test['GarageYrBlt'].mean())
test['GarageArea'] = test['GarageArea'].fillna(test['GarageArea'].mean())
test['MasVnrArea'] = test['MasVnrArea'].fillna(test['MasVnrArea'].mode() [0])
test['BsmtFullBath'] = test['BsmtFullBath'].fillna(test['BsmtFullBath'].mode() [0])
test['BsmtFinSF2'] = test['BsmtFinSF2'].fillna(test['BsmtFinSF2'].mode() [0])
test['BsmtHalfBath'] = test['BsmtHalfBath'].fillna(test['BsmtHalfBath'].mode() [0])
test['LotFrontage'] = test['LotFrontage'].fillna(test['LotFrontage'].mode() [0] )<compute_test_metric>
|
display_image(pd.DataFrame(X_test.reshape(-1, 784)) , Y_pred, n=10, label=True )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.