kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
10,029,170 |
class Regressor(pl.LightningModule):
def __init__(self, input_size, output_size, params, model_path='models/'):
super(Regressor, self ).__init__()
dim_1 = params['dim_1']
dim_2 = params['dim_2']
dim_3 = params['dim_3']
dim_4 = params['dim_4']
self.dropout_prob = params['dropout']
self.lr = params['lr']
self.activation = params['activation']
self.input_size = input_size
self.output_size = output_size
self.loss = nn.BCEWithLogitsLoss()
self.weight_decay = params['weight_decay']
self.amsgrad = params['amsgrad']
self.label_smoothing = params['label_smoothing']
self.model_path = model_path
self.encoder = nn.Sequential(
nn.BatchNorm1d(input_size),
nn.Linear(input_size, dim_1, bias=False),
nn.BatchNorm1d(dim_1),
self.activation() ,
nn.Dropout(p=self.dropout_prob),
nn.Linear(dim_1, dim_2, bias=False),
nn.BatchNorm1d(dim_2),
self.activation() ,
nn.Dropout(p=self.dropout_prob),
nn.Linear(dim_2, dim_3, bias=False),
nn.BatchNorm1d(dim_3),
self.activation() ,
nn.Dropout(p=self.dropout_prob),
nn.Linear(dim_3, dim_4, bias=False),
nn.BatchNorm1d(dim_4),
self.activation() ,
nn.Dropout(p=self.dropout_prob),
nn.Linear(dim_4, self.output_size, bias=False)
)
def forward(self, x):
out = self.encoder(x)
return out
def training_step(self, batch, batch_idx):
x, y = batch['data'], batch['target']
x = x.view(x.size(1), -1)
y = y.view(y.size(1), -1)
logits = self(x)
loss = self.loss(input=logits, target=y)
logits = torch.sigmoid(logits)
auc_metric = roc_auc_score(y_true=y.cpu().numpy() ,
y_score=logits.cpu().detach().numpy())
self.log('train_auc', auc_metric, on_step=False,
on_epoch=True, prog_bar=True)
self.log('train_loss', loss, prog_bar=True)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
x, y = batch['data'], batch['target']
x = x.view(x.size(1), -1)
y = y.view(y.size(1), -1)
logits = self(x)
loss = self.loss(input=logits,
target=y)
logits = torch.sigmoid(logits)
auc = roc_auc_score(y_true=y.cpu().numpy() ,
y_score=logits.cpu().detach().numpy())
return {'loss': loss, 'y': y, 'logits': logits, 'auc': auc}
def validation_epoch_end(self, val_step_outputs):
epoch_loss = torch.tensor([x['loss'] for x in val_step_outputs] ).mean()
epoch_auc = torch.tensor([x['auc'] for x in val_step_outputs] ).mean()
self.log('val_loss', epoch_loss, prog_bar=True)
self.log('val_auc', epoch_auc, prog_bar=True)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outputs):
epoch_loss = torch.tensor([x['loss'] for x in outputs] ).mean()
epoch_auc = torch.tensor([x['auc'] for x in outputs] ).mean()
self.log('test_loss', epoch_loss)
self.log('test_auc', epoch_auc)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters() , lr=self.lr,
amsgrad=self.amsgrad)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, patience=5, factor=0.1, min_lr=1e-7, eps=1e-08
)
return {'optimizer': optimizer, 'lr_scheduler': scheduler, 'monitor': 'val_auc'}
def init_weights(m, func):
if type(m)== nn.Linear:
nn.init.xavier_normal_(m.weight, nn.init.calculate_gain(func))
def cross_val(p)-> object:
data_ = load_data(root_dir='./data/', mode='train')
data_, target_, features, date = preprocess_data(
data_, nn=True, action='multi')
input_size = data_.shape[-1]
output_size = target_.shape[-1]
gts = PurgedGroupTimeSeriesSplit(n_splits=5, group_gap=5)
models = []
tb_logger = pl_loggers.TensorBoardLogger('logs/multiclass_')
for i,(train_idx, val_idx)in enumerate(gts.split(data_, groups=date)) :
idx = np.concatenate([train_idx, val_idx])
data = copy.deepcopy(data_[idx])
target = copy.deepcopy(target_[idx])
checkpoint_callback = pl.callbacks.ModelCheckpoint(
os.path.join('models/', 'multi_class_fold_{}'.format(i)) , monitor='val_auc', save_top_k=1, period=10,
mode='max'
)
model = Regressor(input_size, output_size, p)
if p['activation'] == nn.ReLU:
model.apply(lambda m: init_weights(m, 'relu'))
elif p['activation'] == nn.LeakyReLU:
model.apply(lambda m: init_weights(m, 'leaky_relu'))
train_idx = [i for i in range(0, max(train_idx)+ 1)]
val_idx = [i for i in range(len(train_idx), len(idx)) ]
data[train_idx] = calc_data_mean(
data[train_idx], './cache', train=True, mode='mean')
data[val_idx] = calc_data_mean(
data[val_idx], './cache', train=False, mode='mean')
dataset = FinData(data=data, target=target, date=date, multi=True)
dataloaders = create_dataloaders(
dataset, indexes={'train': train_idx, 'val': val_idx}, batch_size=p['batch_size'])
es = EarlyStopping(monitor='val_auc', patience=10,
min_delta=0.0005, mode='max')
trainer = pl.Trainer(logger=tb_logger,
max_epochs=1,
gpus=1,
callbacks=[checkpoint_callback, es],
precision=16)
trainer.fit(
model, train_dataloader=dataloaders['train'], val_dataloaders=dataloaders['val'])
torch.save(model.state_dict() , f'models/fold_{i}_state_dict.pth')
models.append(model)
return models, features<load_from_csv>
|
mnist_train.isna().any().any()
|
Digit Recognizer
|
10,029,170 |
def final_train(p, load=False):
data_ = load_data(root_dir='./data/', mode='train',overide='/kaggle/input/jane-street-market-prediction/train.csv')
data, target, features, date = preprocess_data(data_, nn=True)
dataset = FinData(data=data, target=target, date=date)
input_size = data.shape[-1]
output_size = 1
train_idx, val_idx = date[date <= 498].index.values.tolist() , date[date > 498].index.values.tolist()
data[train_idx] = calc_data_mean(data[train_idx], './cache', train=True)
data[val_idx] = calc_data_mean(data[val_idx], './cache', train=False)
checkpoint_callback = pl.callbacks.ModelCheckpoint(filepath='./cache/full_train',
monitor="val_auc", mode='max', save_top_k=1, period=10)
model = Regressor(input_size=input_size,
output_size=output_size, params=p)
if p['activation'] == nn.ReLU:
model.apply(lambda m: init_weights(m, 'relu'))
elif p['activation'] == nn.LeakyReLU:
model.apply(lambda m: init_weights(m, 'leaky_relu'))
dataset = FinData(data, target, date)
dataloaders = create_dataloaders(dataset, indexes={'train': train_idx, 'val': val_idx}, batch_size=p['batch_size'])
es = EarlyStopping(monitor='val_auc', patience=10,
min_delta=0.0005, mode='max')
trainer = pl.Trainer(max_epochs=25,
gpus=1,
callbacks=[checkpoint_callback, es],
precision=16)
trainer.fit(model, train_dataloader=dataloaders['train'], val_dataloaders=dataloaders['val'])
torch.save(model.state_dict() , './cache/final_train.pth')
return model, features<categorify>
|
mnist_train_data = mnist_train.loc[:, "pixel0":]
mnist_train_label = mnist_train.loc[:, "label"]
mnist_train_data = mnist_train_data/255.0
mnist_test = mnist_test/255.0
|
Digit Recognizer
|
10,029,170 |
def fillna_npwhere(array, values):
if np.isnan(array.sum()):
array = np.nan_to_num(array)+ np.isnan(array)* values
return array
def test_model(models, features, cache_dir='cache'):
env = janestreet.make_env()
iter_test = env.iter_test()
if type(models)== list:
models = [model.eval() for model in models]
else:
models.eval()
f_mean = np.load(f'{cache_dir}/f_mean.npy')
for(test_df, sample_prediction_df)in tqdm(iter_test):
if test_df['weight'].item() > 0:
vals = torch.FloatTensor(
fillna_npwhere(test_df[features].values, f_mean))
if type(models)== list:
preds = [torch.sigmoid(model.forward(vals.view(1, -1)) ).detach().numpy()
for model in models]
pred = np.mean(np.mean(preds, axis=1))
else:
pred = torch.sigmoid(models.forward(vals.view(1, -1)) ).item()
sample_prediction_df.action = np.where(
pred > 0.502, 1, 0 ).astype(int ).item()
else:
sample_prediction_df.action = 0
env.predict(sample_prediction_df )<init_hyperparams>
|
standardized_scalar = StandardScaler()
standardized_data = standardized_scalar.fit_transform(mnist_train_data)
standardized_data.shape
|
Digit Recognizer
|
10,029,170 |
def main(train=True):
p = {'dim_1': 167,
'dim_2': 454,
'dim_3': 371,
'dim_4': 369,
'dim_5': 155,
'activation': nn.LeakyReLU,
'dropout': 0.21062362698532755,
'lr': 0.0022252024054478523,
'label_smoothing': 0.05564974140461841,
'weight_decay': 0.04106097088288333,
'amsgrad': True,
'batch_size': 10072}
if train:
models, features = final_train(p, load=False)
else:
data_ = load_data(root_dir='./data/', mode='train',overide='/kaggle/input/jane-street-market-prediction/train.csv')
data_, target_, features, date = preprocess_data(data_, nn=True)
model_path = '/kaggle/input/model-states'
f_mean = calc_data_mean(data_, 'cache')
models = load_model(model_path, data_.shape[-1], 5, p, False)
test_model(models, features)
return models
<define_variables>
|
cov_matrix = np.matmul(standardized_data.T, standardized_data)
cov_matrix.shape
|
Digit Recognizer
|
10,029,170 |
pca_components = 60<choose_model_class>
|
lambdas, vectors = eigh(cov_matrix, eigvals=(782, 783))
vectors.shape
|
Digit Recognizer
|
10,029,170 |
e_size = 64
fc_input = pca_components
h_dims = [512,512,256,128]
dropout_rate = 0.5
epochs = 200
minibatch_size = 100000
class MarketPredictor(nn.Module):
def __init__(self):
super(MarketPredictor, self ).__init__()
self.e = nn.Embedding(2,e_size)
self.deep = nn.Sequential(
nn.Linear(fc_input,h_dims[0]),
nn.BatchNorm1d(h_dims[0]),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate),
nn.Linear(h_dims[0],h_dims[1]),
nn.BatchNorm1d(h_dims[1]),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate),
nn.Linear(h_dims[1],h_dims[2]),
nn.BatchNorm1d(h_dims[2]),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate),
nn.Linear(h_dims[2],h_dims[3]),
nn.BatchNorm1d(h_dims[3]),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate),
nn.Linear(h_dims[3],e_size),
nn.BatchNorm1d(e_size),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate)
)
self.reduce = nn.utils.weight_norm(nn.Linear(e_size,5))
def forward(self,xi,xf):
e_out = self.e(xi)
f_out = self.deep(xf)
ef_out = self.reduce(e_out+f_out)
return ef_out
<load_pretrained>
|
new_coordinates = np.matmul(vectors, standardized_data.T)
print(new_coordinates.shape)
new_coordinates = np.vstack(( new_coordinates, mnist_train_label)).T
|
Digit Recognizer
|
10,029,170 |
epochs = 200
path = '/kaggle/input/pytorch-nn-model-more-feature-engineering/marketpredictor_state_dict_'+str(epochs)+'epochs.pt'
model = MarketPredictor()
model.load_state_dict(torch.load(path,map_location=dev))
model.to(dev)
model.eval()<load_pretrained>
|
df_new = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"])
df_new.head()
|
Digit Recognizer
|
10,029,170 |
with open('/kaggle/input/pytorch-nn-model-more-feature-engineering/feature_processing.pkl', 'rb')as f:
sc, pca, maxindex, fill_val, remove_names= pickle.load(f )<define_variables>
|
pca = decomposition.PCA()
pca.n_components = 2
pca_data = pca.fit_transform(standardized_data)
pca_data.shape
|
Digit Recognizer
|
10,029,170 |
feature_names = ['feature_'+str(i)for i in range(1,130)]
exclude = np.where([maxindex[i,1] > 100 and maxindex [i,2] > 1 for i in range(129)])[0]
keep = np.where([(feature_names[i] != remove_names ).all() for i in range(129)])[0]<split>
|
pca_data = np.vstack(( pca_data.T, mnist_train_label)).T
|
Digit Recognizer
|
10,029,170 |
env = janestreet.make_env()
iter_test = env.iter_test()<data_type_conversions>
|
df_PCA = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"])
df_PCA.head()
|
Digit Recognizer
|
10,029,170 |
for(test_df, sample_prediction_df)in iter_test:
if test_df['weight'].item() == 0:
sample_prediction_df.action = 0
else:
test_df_features = test_df[feature_names].to_numpy()
for i in exclude:
if test_df_features[0,i] == maxindex[i,0]:
test_df_features[0,i] = fill_val[i]
test_df_int_features = test_df['feature_0'].to_numpy()
nans = np.isnan(test_df_features)
for i in range(129):
if nans[0,i]:
test_df_features[0,i] = fill_val[i]
test_df_features = test_df_features[:,keep]
test_df_features_scaled = sc.transform(test_df_features)
test_df_features_pca=pca.transform(test_df_features_scaled)
itensor = torch.tensor(( test_df_int_features+1)//2,dtype=torch.long,device=dev)
ftensor = torch.tensor(test_df_features_pca,dtype=torch.float,device=dev)
s = model(itensor,ftensor)[0,0].item()
sample_prediction_df.action = int(np.round(1/(1+np.exp(-s))))
env.predict(sample_prediction_df)
<choose_model_class>
|
mnist_train_data = np.array(mnist_train_data)
mnist_train_label = np.array(mnist_train_label )
|
Digit Recognizer
|
10,029,170 |
e_size = 64
fc_input = 130
h_dims = [512,512,256,128]
dropout_rate = 0.5
epochs = 2000
minibatch_size = 100000
class MarketPredictor(nn.Module):
def __init__(self):
super(MarketPredictor, self ).__init__()
self.deep = nn.Sequential(
nn.Linear(fc_input,h_dims[0]),
nn.BatchNorm1d(h_dims[0]),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate),
nn.Linear(h_dims[0],h_dims[1]),
nn.BatchNorm1d(h_dims[1]),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate),
nn.Linear(h_dims[1],h_dims[2]),
nn.BatchNorm1d(h_dims[2]),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate),
nn.Linear(h_dims[2],h_dims[3]),
nn.BatchNorm1d(h_dims[3]),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate),
nn.Linear(h_dims[3],e_size),
nn.BatchNorm1d(e_size),
nn.LeakyReLU() ,
nn.Dropout(dropout_rate)
)
self.reduce = nn.utils.weight_norm(nn.Linear(e_size,5))
def forward(self,xf):
f_out = self.deep(xf)
ef_out = self.reduce(f_out)
return ef_out
<load_pretrained>
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Lambda, Flatten, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPool2D, AvgPool2D
from tensorflow.keras.optimizers import Adadelta
from keras.utils.np_utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import LearningRateScheduler
|
Digit Recognizer
|
10,029,170 |
path = '/kaggle/input/pytorch-nn-model-w-o-feature-reduction/marketpredictor_state_dict_'+str(epochs)+'epochs.pt'
model = MarketPredictor()
model.load_state_dict(torch.load(path,map_location=dev))
model.to(dev)
model.eval()<load_pretrained>
|
nclasses = mnist_train_label.max() - mnist_train_label.min() + 1
mnist_train_label = to_categorical(mnist_train_label, num_classes = nclasses)
print("Shape of ytrain after encoding: ", mnist_train_label.shape )
|
Digit Recognizer
|
10,029,170 |
with open('/kaggle/input/pytorch-nn-model-w-o-feature-reduction/feature_processing.pkl', 'rb')as f:
sc, maxindex, fill_val = pickle.load(f )<define_variables>
|
def build_model(input_shape=(28, 28, 1)) :
model = Sequential()
model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape = input_shape))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, kernel_size = 4, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
return model
def compile_model(model, optimizer='adam', loss='categorical_crossentropy'):
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
def train_model(model, train, test, epochs, split):
history = model.fit(train, test, shuffle=True, epochs=epochs, validation_split=split)
return history
|
Digit Recognizer
|
10,029,170 |
feature_names = ['feature_'+str(i)for i in range(130)]
exclude = np.where([maxindex[i,1] > 100 and maxindex [i,2] > 1 for i in range(129)])[0]<split>
|
cnn_model = build_model(( 28, 28, 1))
compile_model(cnn_model, 'adam', 'categorical_crossentropy')
model_history = train_model(cnn_model, mnist_train_data, mnist_train_label, 80, 0.2 )
|
Digit Recognizer
|
10,029,170 |
env = janestreet.make_env()
iter_test = env.iter_test()<data_type_conversions>
|
predictions = cnn_model.predict(mnist_test_arr )
|
Digit Recognizer
|
10,029,170 |
for(test_df, sample_prediction_df)in iter_test:
if test_df['weight'].item() == 0:
sample_prediction_df.action = 0
else:
test_df_features = test_df[feature_names].to_numpy()
for i in exclude:
if test_df_features[0,i+1] == maxindex[i,0]:
test_df_features[0,i+1] = fill_val[i]
nans = np.isnan(test_df_features)
for i in range(130):
if nans[0,i]:
test_df_features[0,i] = fill_val[i]
test_df_features = sc.transform(test_df_features)
ftensor = torch.tensor(test_df_features,dtype=torch.float,device=dev)
s = model(ftensor)[0,0].item()
sample_prediction_df.action = int(np.round(1/(1+np.exp(-s))))
env.predict(sample_prediction_df )<import_modules>
|
predictions_test = []
for i in predictions:
predictions_test.append(np.argmax(i))
|
Digit Recognizer
|
10,029,170 |
from tensorflow.keras.layers import Input, Dense, BatchNormalization, Dropout, Concatenate, Lambda, GaussianNoise, Activation
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers.experimental.preprocessing import Normalization
from keras.callbacks import ModelCheckpoint
import tensorflow as tf
import numpy as np
import pandas as pd
from tqdm import tqdm
from random import choices
import dask.dataframe as dd
from glob import glob
import os<choose_model_class>
|
submission = pd.DataFrame({
"ImageId": mnist_test.index+1,
"Label": predictions_test
})
submission.to_csv('my_submission.csv', index=False )
|
Digit Recognizer
|
8,460,609 |
def create_mlp(
num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate
):
inp = tf.keras.layers.Input(shape=(num_columns,))
x = tf.keras.layers.BatchNormalization()(inp)
x = tf.keras.layers.Dropout(dropout_rates[0] )(x)
for i in range(len(hidden_units)) :
x = tf.keras.layers.Dense(hidden_units[i] )(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(tf.keras.activations.swish )(x)
x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x)
x = tf.keras.layers.Dense(num_labels )(x)
out = tf.keras.layers.Activation("sigmoid" )(x)
model = tf.keras.models.Model(inputs=inp, outputs=out)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing),
metrics=tf.keras.metrics.AUC(name="AUC"),
)
return model<prepare_x_and_y>
|
train_df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test_df = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
submission_df = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv" )
|
Digit Recognizer
|
8,460,609 |
data = dd.read_parquet('.. /input/janestreetparquetdata/date*.parquet')
features = ['feature_{}'.format(i)for i in range(130)]
resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4']
train = data.compute()
train = train.query('date > 85' ).reset_index(drop = True)
train = train[train['weight'] != 0]
f_mean = train[features].mean().values
train = train.dropna()
X_train = train[features].values
y_train =(train[resp_cols]> 0 ).astype(int ).values<init_hyperparams>
|
X_train = train_df.iloc[:, 1:].values
y_train = train_df.iloc[:, 0].values
X_test = test_df.values
print(f"X_train shape: {X_train.shape}")
print(f"y_train shape: {y_train.shape}")
print(f"X_test shape: {X_test.shape}" )
|
Digit Recognizer
|
8,460,609 |
SEED = 1111
tf.random.set_seed(SEED)
np.random.seed(SEED)
hidden_units = [150, 150, 150]
dropout_rates = [0.2, 0.2, 0.2, 0.2]
label_smoothing = 1e-2
learning_rate = 1e-3
epochs = 250
batch_size = 5000
save_every_n_epochs = 10
save_freq =(len(X_train)//batch_size)*save_every_n_epochs
clf = create_mlp(
len(features), len(resp_cols), hidden_units, dropout_rates, label_smoothing, learning_rate
)
checkpoint_path = "./cp-{epoch:04d}.ckpt"
checkpoint = ModelCheckpoint(checkpoint_path,save_weights_only=True,save_freq=save_freq)
clf.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,callbacks=[checkpoint])
clf.save_weights('./SimpleMLP.ckpt' )<define_search_space>
|
X_train_combined = np.r_[X_train, X_train_add]
y_train_combined = np.r_[y_train, y_train_add]
del X_train
del X_train_add
del y_train
del y_train_add
print(f"X_train_combined shape: {X_train_combined.shape}")
print(f"y_train_combined shape: {y_train_combined.shape}" )
|
Digit Recognizer
|
8,460,609 |
class TrainData(Dataset):
def __init__(self,file_name,root_dir,predict=False):
self.file_name = file_name
self.root_dir = root_dir
self.feature = ['feature_{}'.format(i)for i in range(130)]
self.resp = ['resp_{}'.format(i)for i in range(1,5)]+['resp']
self.prediction = predict
def __len__(self):
return len(glob(os.path.join(self.root_dir,'*.parquet')))
def __getitem__(self, idx):
data = pd.read_parquet(os.path.join(self.root_dir,self.file_name+'_{}.parquet'.format(idx)))
X = data[self.feature].values
W = data['weight'].values
R = data['resp'].values
return X,W,R<categorify>
|
class ImageReshaper(BaseEstimator, TransformerMixin):
def __init__(self, shape):
self.shape = shape
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X.reshape(self.shape )
|
Digit Recognizer
|
8,460,609 |
dataset = TrainData('date','.. /input/janestreetparquetdata/',predict = True)
weight_path = glob('./*.ckpt.index')
weight_path = [os.path.basename(each ).split('.')[0] for each in weight_path]
weight_path.sort()
for path in weight_path:
clf.load_weights('./{}.ckpt'.format(path))
p = []
for i in range(len(dataset)) :
net_input,weight,resp= dataset[i]
net_input = np.nan_to_num(net_input)+f_mean*(np.isnan(net_input ).astype(int))
net_prediction = clf.predict(net_input)
pre =(np.median(net_prediction,axis=1)>0.5 ).astype(int)
p.append(( weight*resp*pre ).sum())
result = pd.DataFrame(data={'p':p})
result ['p2'] = result['p']**2
t =(result['p'].sum() /np.sqrt(result['p2'].sum())) *(np.sqrt(250/len(dataset)))
print(path,min(max(t,0),6)*result['p'].sum() )<load_pretrained>
|
def build_lenet5_model() :
model = Sequential()
model.add(Conv2D(6, kernel_size=5, activation='relu',
input_shape=(28,28,1)))
model.add(MaxPooling2D())
model.add(Conv2D(16, kernel_size=5, activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(400, activation='relu'))
model.add(Dense(120, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
|
Digit Recognizer
|
8,460,609 |
selection = 'cp-0200'
clf.load_weights('./{}.ckpt'.format(selection))<predict_on_test>
|
def build_custom_lenet5_model() :
model = Sequential()
model.add(Conv2D(32,kernel_size=3,activation='relu',input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=3,activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64,kernel_size=3,activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64,kernel_size=3,activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64,kernel_size=5,strides=2,padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
|
Digit Recognizer
|
8,460,609 |
env = janestreet.make_env()
th = 0.5
for(test_df, pred_df)in tqdm(env.iter_test()):
if test_df['weight'].item() > 0:
x_tt = test_df.loc[:, features].values
x_tt = np.nan_to_num(x_tt)+f_mean*(np.isnan(x_tt ).astype(int))
pred = np.median(clf(x_tt, training=False))
pred_df.action = np.where(pred >= th, 1, 0 ).astype(int)
else:
pred_df.action = 0
env.predict(pred_df )<load_pretrained>
|
stratified_fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
for fold, indices in enumerate(stratified_fold.split(X_train_combined, y_train_combined)) :
X_train_, y_train_ = X_train_combined[indices[0]], y_train_combined[indices[0]]
X_test_, y_test_ = X_train_combined[indices[1]], y_train_combined[indices[1]]
model_pipeline = Pipeline([
('min_max_scaler', MinMaxScaler()),
('image_reshaper', ImageReshaper(shape=(-1, 28, 28, 1))),
('model', KerasClassifier(build_lenet5_model, epochs=5, batch_size=32))
])
model_pipeline.fit(X_train_, y_train_)
predictions = model_pipeline.predict(X_test_)
print(f"Classification report for Fold {fold + 1}:")
print(classification_report(y_test_, predictions, digits=3), end="
")
print(f"Confusion Matrix for Fold {fold + 1}:")
print(confusion_matrix(y_test_, predictions), end="
")
del X_train_
del X_test_
del y_train_
del y_test_
|
Digit Recognizer
|
8,460,609 |
SEED = 1111
inference = False
cv = False
tf.random.set_seed(SEED)
np.random.seed(SEED)
train_pickle_file = '/kaggle/input/pickling/train.csv.pandas.pickle'
train = pickle.load(open(train_pickle_file, 'rb'))
train = train.query('date > 85' ).reset_index(drop = True)
train = train[train['weight'] != 0]
train.fillna(train.mean() ,inplace=True)
train['action'] =(( train['resp'].values)> 0 ).astype(int)
train['bias'] = 1
features = [c for c in train.columns if "feature" in c]<normalization>
|
lenet5_model = Pipeline([
('min_max_scaler', MinMaxScaler()),
('image_reshaper', ImageReshaper(shape=(-1, 28, 28, 1))),
('model', KerasClassifier(build_lenet5_model, epochs=5, batch_size=32))
])
custom_lenet5_model = Pipeline([
('min_max_scaler', MinMaxScaler()),
('image_reshaper', ImageReshaper(shape=(-1, 28, 28, 1))),
('model', KerasClassifier(build_custom_lenet5_model, epochs=20, batch_size=32))
])
lenet5_model.fit(X_train_combined, y_train_combined)
lenet5_model_predictions = lenet5_model.predict_proba(X_test)
custom_lenet5_model.fit(X_train_combined, y_train_combined)
custom_lenet5_model_predictions = custom_lenet5_model.predict_proba(X_test )
|
Digit Recognizer
|
8,460,609 |
def build_neutralizer(train, features, proportion, return_neut=False):
neutralizer = {}
neutralized_features = np.zeros(( train.shape[0], len(features)))
target = train[['resp', 'bias']].values
for i, f in enumerate(features):
feature = train[f].values.reshape(-1, 1)
coeffs = np.linalg.lstsq(target, feature)[0]
neutralized_features[:, i] =(feature -(proportion * target.dot(coeffs)) ).squeeze()
neutralizer = np.linalg.lstsq(train[features+['bias']].values, neutralized_features)[0]
if return_neut:
return neutralized_features, neutralizer
else:
return neutralizer
def neutralize_array(array, neutralizer):
neutralized_array = array.dot(neutralizer)
return neutralized_array
def test_neutralization() :
dummy_train = train.loc[:100000, :]
proportion = 1.0
neutralized_features, neutralizer = build_neutralizer(dummy_train, features, proportion, True)
dummy_neut_train = neutralize_array(dummy_train[features+['bias']].values, neutralizer)
print(neutralized_features[0, :10], dummy_neut_train[0, :10])
test_neutralization()<feature_engineering>
|
predictions = lenet5_model_predictions + custom_lenet5_model_predictions
predictions = np.argmax(predictions, axis=1 )
|
Digit Recognizer
|
8,460,609 |
<prepare_x_and_y><EOS>
|
submission_df["Label"] = predictions
submission_df.to_csv('submissions.csv', index=False)
FileLink('submissions.csv' )
|
Digit Recognizer
|
1,425,655 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
|
%matplotlib inline
|
Digit Recognizer
|
1,425,655 |
def create_mlp(
num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate
):
inp = tf.keras.layers.Input(shape=(num_columns,))
x = tf.keras.layers.BatchNormalization()(inp)
x = tf.keras.layers.Dropout(dropout_rates[0] )(x)
for i in range(len(hidden_units)) :
x = tf.keras.layers.Dense(hidden_units[i] )(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(tf.keras.activations.swish )(x)
x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x)
x = tf.keras.layers.Dense(num_labels )(x)
out = tf.keras.layers.Activation("sigmoid" )(x)
model = tf.keras.models.Model(inputs=inp, outputs=out)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing),
metrics=tf.keras.metrics.AUC(name="AUC"),
)
return model
batch_size = 5000
hidden_units = [150, 150, 150]
dropout_rates = [0.2, 0.2, 0.2, 0.2]
label_smoothing = 1e-2
learning_rate = 1e-3
epochs = 200
clf = create_mlp(
len(features), 5, hidden_units, dropout_rates, label_smoothing, learning_rate
)
if inference:
clf = keras.models.load_model('keras_nn')
else:
clf.fit(X, y, epochs=epochs, batch_size=5000)
clf.save('keras_nn')
models = []
models.append(clf)
th = 0.5000
f = np.median
models = models[-3:]
<split>
|
IMG_ROWS = 28
IMG_COLS = 28
NUM_CLASSES = 10
TEST_SIZE = 0.1
RANDOM_STATE = 2018
NO_EPOCHS = 150
PATIENCE = 20
VERBOSE = 1
BATCH_SIZE = 128
IS_LOCAL = False
if(IS_LOCAL):
PATH=".. /input/digit-recognizer/"
else:
PATH=".. /input/"
print(os.listdir(PATH))
|
Digit Recognizer
|
1,425,655 |
if cv:
oof_probas = np.zeros(y.shape)
val_idx_all = []
N_SPLITS = 5
gkf = GroupKFold(n_splits=N_SPLITS)
for fold,(train_idx, val_idx)in enumerate(gkf.split(train.action.values, groups=train.date.values)) :
X_train, X_val = X.iloc[train_idx], X.iloc[val_idx].values
y_train, y_val = y[train_idx], y[val_idx]
clf.fit(X_train, y_train, epochs=epochs, batch_size=5000)
oof_probas[val_idx] += clf(X_val, training=False ).numpy()
score = roc_auc_score(y_val, oof_probas[val_idx])
print(f'FOLD {fold} ROC AUC:\t {score}')
del X_train, X_val, y_train, y_val
gc.collect()
val_idx_all.append(val_idx)
val_idx = np.concatenate(val_idx_all)
<compute_test_metric>
|
train_file = PATH+"train.csv"
test_file = PATH+"test.csv"
train_df = pd.read_csv(train_file)
test_df = pd.read_csv(test_file )
|
Digit Recognizer
|
1,425,655 |
if cv:
auc_oof = roc_auc_score(y[val_idx], oof_probas[val_idx])
print(auc_oof )<compute_test_metric>
|
print("MNIST train - rows:",train_df.shape[0]," columns:", train_df.shape[1])
print("MNIST test - rows:",test_df.shape[0]," columns:", test_df.shape[1] )
|
Digit Recognizer
|
1,425,655 |
def determine_action(df, thresh):
action =(df.weight * df.resp > thresh ).astype(int)
return action
def date_weighted_resp(df):
cols = ['weight', 'resp', 'action']
weighted_resp = np.prod(df[cols], axis=1)
return weighted_resp.sum()
def calculate_t(dates_p):
e_1 = dates_p.sum() / np.sqrt(( dates_p**2 ).sum())
e_2 = np.sqrt(250/np.abs(len(dates_p)))
return e_1 * e_2
def calculate_u(df, thresh):
df = df.copy()
dates_p = df.groupby('date' ).apply(date_weighted_resp)
t = calculate_t(dates_p)
return t, min(max(t, 0), 6)* dates_p.sum()
def plot_roc_curve(fpr, tpr, label=None):
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--', label='Random')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc='lower right')
plt.grid()
def plot_precision_recall_curve(precisions, recalls, thresholds):
plt.figure(figsize=(8, 6))
plt.plot(thresholds, precisions[:-1], 'b--', label='Precision')
plt.plot(thresholds, recalls[:-1], 'g-', label='Recall')
plt.xlabel('Thresholds')
plt.legend(loc='lower left')
plt.grid()
def plot_thresh_u_t(df, oof):
threshs = np.linspace(0, 1, 1000)
ts = []
us = []
for thresh in threshs:
df['action'] = np.where(oof >= thresh, 1, 0)
t, u = calculate_u(df, thresh)
ts.append(t)
us.append(u)
ts = np.array(ts)
us = np.array(us)
ts = np.where(np.isnan(ts), 0.0, ts)
us = np.where(np.isnan(us), 0.0, us)
tmax = np.argmax(ts)
umax = np.argmax(us)
print(f'Max Utility Score: {us[umax]}')
fig, axes = plt.subplots(1, 2, figsize=(14, 4))
axes[0].plot(threshs, ts)
axes[0].set_title('Different t scores by threshold')
axes[0].set_xlabel('Threshold')
axes[0].axvline(threshs[tmax])
axes[1].plot(threshs, us)
axes[1].set_title('Different u scores by threshold')
axes[1].set_xlabel('Threshold')
axes[1].axvline(threshs[umax], color='r', linestyle='--', linewidth=1.2)
print(f'Optimal Threshold: {threshs[umax]}')
return threshs[umax]
<feature_engineering>
|
def get_classes_distribution(data):
label_counts = data["label"].value_counts()
total_samples = len(data)
for i in range(len(label_counts)) :
label = label_counts.index[i]
count = label_counts.values[i]
percent =(count / total_samples)* 100
print("{}: {} or {}%".format(label, count, percent))
get_classes_distribution(train_df )
|
Digit Recognizer
|
1,425,655 |
env = janestreet.make_env()
for(test_df, pred_df)in tqdm(env.iter_test()):
if test_df['weight'].item() > 0:
x_tt = test_df.loc[:, features].values
if np.isnan(x_tt[:, 1:].sum()):
x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean
x_tt = np.append(x_tt, [[1]], axis=1)
x_tt = neutralize_array(x_tt, neutralizer)
pred = np.mean([model(x_tt, training = False ).numpy() for model in models],axis=0)
pred = f(pred)
pred_df.action = np.where(pred >= opt_thresh, 1, 0 ).astype(int)
else:
pred_df.action = 0
env.predict(pred_df )<install_modules>
|
def sample_images_data(data, hasLabel=True):
sample_images = []
sample_labels = []
if(hasLabel):
for k in range(0,10):
samples = data[data["label"] == k].head(4)
for j, s in enumerate(samples.values):
img = np.array(samples.iloc[j, 1:] ).reshape(IMG_ROWS,IMG_COLS)
sample_images.append(img)
sample_labels.append(samples.iloc[j, 0])
else:
samples = data.iloc[random.sample(range(1, 10000), 40),]
for j, s in enumerate(samples.values):
img = np.array(samples.iloc[j, 0:] ).reshape(IMG_ROWS,IMG_COLS)
sample_images.append(img)
sample_labels.append(-1)
print("Total number of sample images to plot: ", len(sample_images))
return sample_images, sample_labels
train_sample_images, train_sample_labels = sample_images_data(train_df )
|
Digit Recognizer
|
1,425,655 |
def install(package):
subprocess.check_call([sys.executable, "-m", "pip","install",package])
install(".. /input/fastremap/fastremap-1.10.2-cp37-cp37m-manylinux1_x86_64.whl")
install(".. /input/fillvoids/fill_voids-2.0.0-cp37-cp37m-manylinux1_x86_64.whl")
install(".. /input/finalmask")
install("pydicom" )<set_options>
|
def data_preprocessing(raw, hasLabel=True):
start_pixel = 0
if(hasLabel):
start_pixel = 1
if(hasLabel):
out_y = keras.utils.to_categorical(raw.label, NUM_CLASSES)
else:
out_y = None
num_images = raw.shape[0]
x_as_array = raw.values[:,start_pixel:]
x_shaped_array = x_as_array.reshape(num_images, IMG_ROWS, IMG_COLS, 1)
out_x = x_shaped_array / 255
return out_x, out_y
|
Digit Recognizer
|
1,425,655 |
sns.set(style="whitegrid")
sns.set_context("paper")
<define_variables>
|
X, y = data_preprocessing(train_df)
X_test, y_test = data_preprocessing(test_df,hasLabel=False )
|
Digit Recognizer
|
1,425,655 |
def seed_everything(seed=2020):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
seed_everything(42 )<load_from_csv>
|
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=TEST_SIZE, random_state=RANDOM_STATE )
|
Digit Recognizer
|
1,425,655 |
ROOT = ".. /input/osic-pulmonary-fibrosis-progression"
train=pd.read_csv(f"{ROOT}/train.csv")
train.head()<load_from_csv>
|
print("MNIST train - rows:",X_train.shape[0]," columns:", X_train.shape[1:4])
print("MNIST valid - rows:",X_val.shape[0]," columns:", X_val.shape[1:4])
print("MNIST test - rows:",X_test.shape[0]," columns:", X_test.shape[1:4] )
|
Digit Recognizer
|
1,425,655 |
sample_submission=pd.read_csv(f"{ROOT}/sample_submission.csv")
test=pd.read_csv(f"{ROOT}/test.csv")
test.head()<merge>
|
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu', padding="same",
kernel_initializer='he_normal',input_shape=(IMG_ROWS, IMG_COLS, 1)))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu'))
model.add(MaxPooling2D(( 2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size=(3, 3), strides=2,padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3, 3), strides=2,padding='same', activation='relu'))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(NUM_CLASSES, activation='softmax'))
|
Digit Recognizer
|
1,425,655 |
train['Patient_Week']=train['Patient']+'_'+train['Weeks'].astype(str)
lists=train['Patient_Week'][train.duplicated(['Patient_Week'], keep=False)].unique().tolist()
for patient_week in lists:
new_row=train.loc[train['Patient_Week']==patient_week].groupby(['Patient','Weeks','Age','Sex','SmokingStatus','Patient_Week'] ).mean().copy()
train=train[train['Patient_Week']!=patient_week]
train=train.append(new_row.reset_index())
add=train.copy()
add.rename(columns={'Weeks':'base_weeks','FVC':'base_fvc'},inplace=True)
final=train.merge(add,on='Patient')
final.drop(['Patient_Week_x','Age_y','Sex_y','SmokingStatus_y','Percent_y'],axis=1,inplace=True)
final.rename(columns={'Weeks':'base_week','FVC':'base_fvc','base_fvc':'FVC','Percent_x':'base_percent','Patient_Week_y':'Patient_Week','Age_x':'Age','Sex_x':'sex','SmokingStatus_x':'smokingstatus','base_weeks':'predict_week'},inplace=True)
final['weeks_passed']=final['predict_week']-final['base_week']
cols=['Patient','Patient_Week', 'base_week', 'base_fvc', 'base_percent', 'Age', 'sex','smokingstatus','predict_week','weeks_passed', 'FVC']
final=final[cols]
final=final.loc[final['weeks_passed']!=0]
final.reset_index(drop=True,inplace=True)
final.head()
<feature_engineering>
|
model.compile(loss = "categorical_crossentropy", optimizer="adam", metrics=["accuracy"] )
|
Digit Recognizer
|
1,425,655 |
test.rename(columns={'Weeks': 'base_Week', 'FVC': 'base_FVC', 'Percent': 'base_Percent', 'Age': 'base_Age'},inplace=True)
Week=sample_submission['Patient_Week'].apply(lambda x : x.split('_')[1] ).unique()
Week=np.tile(Week, len(test['Patient']))
test=test.loc[test.index.repeat(146)].reset_index(drop=True)
test['predict_week']=Week
test['Patient_Week']=test['Patient']+'_'+test['predict_week']
test['weeks_passed']=test['predict_week'].astype(int)-test['base_Week'].astype(int)
test.rename(columns={'base_Week':'base_week','base_FVC':'base_fvc','base_Percent':'base_percent','base_Age':'Age','Sex':'sex','SmokingStatus':'smokingstatus'},inplace=True)
cols=['Patient','Patient_Week','base_week','base_fvc','base_percent','Age','sex','smokingstatus','predict_week','weeks_passed']
test=test[cols]
print(test.shape)
test.head()<load_pretrained>
|
plot_model(model, to_file='model.png')
SVG(model_to_dot(model ).create(prog='dot', format='svg'))
|
Digit Recognizer
|
1,425,655 |
file_path= '.. /input/osic-pulmonary-fibrosis-progression/train/ID00007637202177411956430/10.dcm'
dataset = pydicom.dcmread(file_path)
<compute_test_metric>
|
NO_EPOCHS = 10
|
Digit Recognizer
|
1,425,655 |
def score(y_true, y_pred):
tf.dtypes.cast(y_true, tf.float32)
tf.dtypes.cast(y_pred, tf.float32)
sigma = abs(y_pred[:,2] - y_pred[:,0])
fvc_pred = y_pred[:,1]
sigma_clip = tf.maximum(sigma, 70)
delta = tf.abs(y_true[:, 0] - fvc_pred)
delta = tf.minimum(delta, 1000)
sq2 = tf.sqrt(tf.dtypes.cast(2, dtype=tf.float32))
metric =(delta / sigma_clip)*sq2 + tf.math.log(sigma_clip* sq2)
return metric
def qloss(y_true, y_pred):
qs = [0.8,0.5,0.2]
q = tf.constant(np.array([qs]), dtype=tf.float32)
e = y_true - y_pred
v = tf.maximum(q*e,(q-1)*e)
return kb.mean(v)
def mloss(_lambda):
def loss(y_true, y_pred):
return _lambda * qloss(y_true, y_pred)+(1 - _lambda)*score(y_true, y_pred)
return loss<load_pretrained>
|
earlystopper = EarlyStopping(monitor='loss', patience=PATIENCE, verbose=VERBOSE)
checkpointer = ModelCheckpoint('best_model.h5',
monitor='val_acc',
verbose=VERBOSE,
save_best_only=True,
save_weights_only=True)
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE,
epochs=NO_EPOCHS,
verbose=1,
validation_data=(X_val, y_val),
callbacks=[earlystopper, checkpointer] )
|
Digit Recognizer
|
1,425,655 |
input_image = sitk.ReadImage('.. /input/osic-pulmonary-fibrosis-progression/train/ID00007637202177411956430/12.dcm')
segmentation = mask.apply(input_image)
plt.figure(figsize=(10,10))
plt.imshow(segmentation[0] )<load_from_csv>
|
print("run model - predict validation set")
score = model.evaluate(X_val, y_val, verbose=0)
print(f'Last validation loss: {score[0]}, accuracy: {score[1]}')
model_optimal = model
model_optimal.load_weights('best_model.h5')
score = model_optimal.evaluate(X_val, y_val, verbose=0)
print(f'Best validation loss: {score[0]}, accuracy: {score[1]}' )
|
Digit Recognizer
|
1,425,655 |
atten=pd.read_csv('.. /input/attent-1/atten(1 ).csv')
patients=os.listdir(f"{ROOT}/{how}")
avg_atten_test=[]
for patient in patients:
try:
mid=mid_image_test(patient,True,True)
postives=mid>mid.min()
mid[postives].mean()
avg_atten_test.append(mid[postives].mean())
except:
avg_atten_test.append(np.nan)
continue<prepare_x_and_y>
|
def predict_show_classes(model, X_val, y_val):
predicted_classes = model.predict_classes(X_val)
y_true = np.argmax(y_val,axis=1)
correct = np.nonzero(predicted_classes==y_true)[0]
incorrect = np.nonzero(predicted_classes!=y_true)[0]
print("Correct predicted classes:",correct.shape[0])
print("Incorrect predicted classes:",incorrect.shape[0])
target_names = ["Class {}:".format(i)for i in range(NUM_CLASSES)]
print(classification_report(y_true, predicted_classes, target_names=target_names))
return correct, incorrect
|
Digit Recognizer
|
1,425,655 |
final1=final.copy()
final1=final1.merge(atten,on='Patient')
X1=final1[['base_fvc','base_percent','Age','sex','smokingstatus','weeks_passed','avg_atten','base_week']].copy()
y1=final1.FVC.copy()<categorify>
|
correct, incorrect = predict_show_classes(model, X_val, y_val )
|
Digit Recognizer
|
1,425,655 |
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(X1[['sex','smokingstatus']])
encoded=pd.DataFrame(enc.transform(X1[['sex','smokingstatus']] ).toarray())
X1=X1.join(encoded)
X1.drop(['smokingstatus','sex'],axis=1,inplace=True)
scaler=preprocessing.MinMaxScaler().fit(X1)
X1=pd.DataFrame(scaler.transform(X1))
X1.head()<categorify>
|
correct, incorrect = predict_show_classes(model_optimal, X_val, y_val )
|
Digit Recognizer
|
1,425,655 |
atten_test=pd.DataFrame({'Patient':patients,'avg_atten':avg_atten_test})
atten_test['avg_atten']=atten_test['avg_atten'].fillna(atten["avg_atten"].mean())
X_test=test.merge(atten_test,on='Patient')
X_test=X_test[['base_fvc','base_percent','Age','sex','smokingstatus','weeks_passed','avg_atten','base_week']].copy()
encoded=pd.DataFrame(enc.transform(X_test[['sex','smokingstatus']] ).toarray())
X_test=X_test.join(encoded)
X_test.drop(['smokingstatus','sex'],axis=1,inplace=True)
X_test=pd.DataFrame(scaler.transform(X_test))
X1=X1.astype(np.float32)
y1=y1.astype(np.float32 )<choose_model_class>
|
y_cat = model.predict(X_test, batch_size=64 )
|
Digit Recognizer
|
1,425,655 |
inputs= keras.Input(shape=[11])
dense = layers.Dense(100, activation="relu")
x = dense(inputs)
x = layers.Dense(100, activation="relu" )(x)
output1 = layers.Dense(3,activation='linear' )(x)
model = keras.Model(inputs=inputs, outputs=output1)
model.summary()<train_model>
|
y_pred = np.argmax(y_cat,axis=1 )
|
Digit Recognizer
|
1,425,655 |
model.compile(loss=mloss(0.8),optimizer='adam',metrics=score)
model.fit(X1, y1,batch_size=512,epochs=130 )<predict_on_test>
|
output_file = "submission.csv"
with open(output_file, 'w')as f :
f.write('ImageId,Label
')
for i in range(len(y_pred)) :
f.write("".join([str(i+1),',',str(y_pred[i]),'
']))
|
Digit Recognizer
|
1,425,655 |
preds_high=model.predict(X_test)[:,0]
preds_low=model.predict(X_test)[:,2]
preds=model.predict(X_test)[:,1]<prepare_output>
|
y_cat = model_optimal.predict(X_test, batch_size=64)
y_pred = np.argmax(y_cat,axis=1)
output_file = "submission_optimal.csv"
with open(output_file, 'w')as f :
f.write('ImageId,Label
')
for i in range(len(y_pred)) :
f.write("".join([str(i+1),',',str(y_pred[i]),'
']))
|
Digit Recognizer
|
2,712,650 |
preds_set=pd.DataFrame({'preds_high':preds_high})
preds_set['preds']=preds
preds_set['preds_low']=preds_low
preds_set['sigma_pred']=abs(preds_set['preds_high']-preds_set['preds_low'])
preds_set.reset_index(inplace=True,drop=True)
preds_set<save_to_csv>
|
print(K.image_data_format() )
|
Digit Recognizer
|
2,712,650 |
submission=pd.DataFrame({'Patient_Week':test['Patient_Week'],'FVC': preds_set['preds'],'Confidence':preds_set['sigma_pred']})
submission['FVC']=submission['FVC'].apply(lambda x: round(x, 4)) /1
submission['Confidence']=submission['Confidence'].apply(lambda x: round(x, 4))
submission.to_csv('submission.csv',index=False)
submission.head()<load_pretrained>
|
( x_train, y_train),(x_test, y_test)= mnist.load_data()
|
Digit Recognizer
|
2,712,650 |
train_arc = zipfile.ZipFile('/kaggle/input/whats-cooking/train.json.zip','r')
train_data = pd.read_json(train_arc.read('train.json'))
train_data.head()<load_pretrained>
|
y_train = to_categorical(y_train, num_classes = 10)
y_test = to_categorical(y_test, num_classes=10 )
|
Digit Recognizer
|
2,712,650 |
test_arc = zipfile.ZipFile('/kaggle/input/whats-cooking/test.json.zip','r')
test_data = pd.read_json(test_arc.read('test.json'))
test_data.head()<count_values>
|
x_train.astype('float32')
x_test.astype('float32' )
|
Digit Recognizer
|
2,712,650 |
train_data['cuisine'].value_counts()<feature_engineering>
|
x_train = x_train/255
x_test = x_test/255
|
Digit Recognizer
|
2,712,650 |
train_ingredients_count = {}
for i in range(len(train_data)) :
for j in train_data['ingredients'][i]:
if j in train_ingredients_count.keys() :
train_ingredients_count[j]+=1
else:
train_ingredients_count[j] = 1<count_values>
|
input_shape =(28,28,1)
model = Sequential()
model.add(Conv2D(96,(3, 3),
padding='Same',
activation='relu',
input_shape=input_shape))
model.add(BatchNormalization())
model.add(Conv2D(96,(3, 3),
padding='Same',
activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(192,(3,3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(256,(3,3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(256,(3,3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(256,(3,3), activation='relu', padding='same'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(units=10, activation='softmax'))
|
Digit Recognizer
|
2,712,650 |
train_ingredients_count['romaine lettuce']<feature_engineering>
|
call_back = keras.callbacks.EarlyStopping(monitor='val_acc', min_delta=0, patience=5, verbose=0, restore_best_weights=True)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
factor=0.25,
verbose=1,
patience=2,
min_lr=0.000001 )
|
Digit Recognizer
|
2,712,650 |
test_ingredients_count = {}
for i in range(len(test_data)) :
for j in test_data['ingredients'][i]:
if j in test_ingredients_count.keys() :
test_ingredients_count[j]+=1
else:
test_ingredients_count[j] = 1<define_variables>
|
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.001),
metrics=['accuracy'])
train_datagen = ImageDataGenerator(shear_range=0.2,
zoom_range=0.2,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
height_shift_range=.1,
rotation_range=10,
width_shift_range=.1)
train_datagen.fit(x_train)
history = model.fit_generator(
train_datagen.flow(x_train,y_train, batch_size=128),
steps_per_epoch=x_train.shape[0] // 128,
epochs=32,
validation_data=(x_test,y_test),callbacks=[call_back, reduce_lr] )
|
Digit Recognizer
|
2,712,650 |
train_ingred_miss = []
for i in test_ingredients_count.keys() :
if i not in train_ingredients_count.keys() :
train_ingred_miss.append(i )<feature_engineering>
|
xtest = pd.read_csv(".. /input/test.csv" )
|
Digit Recognizer
|
2,712,650 |
<define_variables><EOS>
|
submission = model.predict(xtest)
submission = np.argmax(submission, axis = 1)
submission = pd.Series(submission,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),submission],axis = 1)
submission.to_csv("submission.csv",index=False )
|
Digit Recognizer
|
2,188,530 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
|
tf.set_random_seed(42 )
|
Digit Recognizer
|
2,188,530 |
for i in test_ingred_miss:
test_ingredients_count[i] = 0
len(test_ingredients_count )<feature_engineering>
|
train_path = os.path.join('.. ', 'input', 'train.csv')
test_path = os.path.join('.. ', 'input', 'test.csv')
size = 28
lr = 0.001
num_classes = 10
epochs = 30
batch_size = 128
|
Digit Recognizer
|
2,188,530 |
for i in train_ingredients_count.keys() :
train_data[i] = np.zeros(len(train_data))<feature_engineering>
|
raw_train_df = pd.read_csv(train_path)
raw_test_df = pd.read_csv(test_path )
|
Digit Recognizer
|
2,188,530 |
for i in test_ingredients_count.keys() :
test_data[i] = np.zeros(len(test_data))<feature_engineering>
|
def parse_train_df(_train_df):
labels = _train_df.iloc[:,0].values
imgs = _train_df.iloc[:,1:].values
imgs_2d = np.array([[[[float(imgs[index][i*28 + j])/ 255] for j in range(28)] for i in range(28)] for index in range(len(imgs)) ])
processed_labels = [[0 for _ in range(10)] for i in range(len(labels)) ]
for i in range(len(labels)) :
processed_labels[i][labels[i]] = 1
return np.array(processed_labels), imgs_2d
def parse_test_df(test_df):
imgs = test_df.iloc[:, 0:].values
imgs_2d = np.array([[[[float(imgs[index][i * 28 + j])/ 255] for j in range(28)] for i in range(28)] for index in
range(len(imgs)) ])
return imgs_2d
|
Digit Recognizer
|
2,188,530 |
for i in range(len(train_data)) :
for j in train_data['ingredients'][i]:
train_data[j].iloc[i] = 1<feature_engineering>
|
y_train_set, x_train_set = parse_train_df(raw_train_df)
x_test = parse_test_df(raw_test_df)
x_train, x_val, y_train, y_val = train_test_split(x_train_set, y_train_set, test_size=0.20, random_state=42 )
|
Digit Recognizer
|
2,188,530 |
for i in range(len(test_data)) :
for j in test_data['ingredients'][i]:
test_data[j].iloc[i] = 1<drop_column>
|
print("Number of 1: {}".format(len(raw_train_df[raw_train_df['label'] == 1])))
print("Number of 5: {}".format(len(raw_train_df[raw_train_df['label'] == 5])) )
|
Digit Recognizer
|
2,188,530 |
test_data=test_data[train_data.drop('cuisine',axis=1 ).columns]<import_modules>
|
model = keras.Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), strides=(1, 1),
activation='relu',
input_shape=(size, size, 1)))
model.add(Conv2D(32,(3, 3), activation='relu', strides=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64,(3, 3), activation='relu'))
model.add(Conv2D(64,(3, 3), activation='relu', strides=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(128,(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=Adam(lr),
metrics=['accuracy'])
checkpoint = ModelCheckpoint('model_ckpt.{epoch:02d}.hdf5',
save_best_only=True,
save_weights_only=True)
lr_reducer = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=3,
mode='max', cooldown=3, verbose=1)
callback_list = [checkpoint, lr_reducer]
|
Digit Recognizer
|
2,188,530 |
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing<prepare_x_and_y>
|
training_history = model.fit(
x_train,
y_train,
epochs=epochs,
verbose=1,
validation_data=(x_val, y_val),
callbacks=callback_list
)
|
Digit Recognizer
|
2,188,530 |
X = train_data.drop(['id','cuisine','ingredients'],axis =1)
Y = train_data['cuisine']<split>
|
image_generator = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1
)
image_generator.fit(x_train )
|
Digit Recognizer
|
2,188,530 |
X_train,X_val,y_train,y_val = train_test_split(X,Y,random_state =42 )<train_model>
|
model_augmented = keras.Sequential()
model_augmented.add(Conv2D(32, kernel_size=(3, 3), strides=(1, 1),
activation='relu',
input_shape=(size, size, 1)))
model_augmented.add(Conv2D(32,(3, 3), activation='relu', strides=(2, 2)))
model_augmented.add(BatchNormalization())
model_augmented.add(Dropout(0.3))
model_augmented.add(Conv2D(64,(3, 3), activation='relu'))
model_augmented.add(Conv2D(64,(3, 3), activation='relu', strides=(2, 2)))
model_augmented.add(BatchNormalization())
model_augmented.add(Dropout(0.3))
model_augmented.add(Conv2D(128,(3, 3), activation='relu'))
model_augmented.add(BatchNormalization())
model_augmented.add(Flatten())
model_augmented.add(Dense(256, activation='relu'))
model_augmented.add(Dropout(0.25))
model_augmented.add(Dense(128, activation='relu'))
model_augmented.add(Dense(num_classes, activation='softmax'))
model_augmented.compile(loss=keras.losses.categorical_crossentropy,
optimizer=RMSprop(0.001),
metrics=['accuracy'])
aug_result = model_augmented.fit_generator(
image_generator.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs,
steps_per_epoch=len(x_train)// batch_size,
verbose=1,
validation_data=(x_val, y_val),
callbacks=callback_list
)
model_augmented.save('mnist_model.h5' )
|
Digit Recognizer
|
2,188,530 |
lr = LogisticRegression(solver='liblinear')
lr.fit(X_train,y_train )<compute_test_metric>
|
pred = model.predict(x_test)
pred_aug = model_augmented.predict(x_test )
|
Digit Recognizer
|
2,188,530 |
<predict_on_test><EOS>
|
def convert_prediction_result(model_result):
result = []
for i in range(len(model_result)) :
result += [np.argmax(model_result[i])]
return result
def write_submission(_submission_path, result_arr):
f_out = open(_submission_path, 'w')
f_out.write("ImageId,Label
")
for i in range(len(result_arr)) :
f_out.write("{},{}
".format(i+1, result_arr[i]))
f_out.close()
write_submission('submission_base.csv', convert_prediction_result(pred))
write_submission('submission_aug.csv', convert_prediction_result(pred))
|
Digit Recognizer
|
5,811,807 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<prepare_output>
|
np.random.seed(92)
|
Digit Recognizer
|
5,811,807 |
Submission=test_data[['id','cuisine']]
Submission.set_index('id',inplace=True )<save_to_csv>
|
train_data = '/kaggle/input/digit-recognizer/train.csv'
test_data = '/kaggle/input/digit-recognizer/test.csv'
|
Digit Recognizer
|
5,811,807 |
Submission.to_csv('Submission.csv' )<load_pretrained>
|
train_df = pd.read_csv(train_data)
print(train_df.shape)
train_df.head()
|
Digit Recognizer
|
5,811,807 |
archive_train=zipfile.ZipFile('/kaggle/input/whats-cooking/train.json.zip','r')
train_data=pd.read_json(archive_train.read('train.json'))
train_data.head()<load_pretrained>
|
test_df = pd.read_csv(test_data)
print(test_df.shape)
test_df.head()
|
Digit Recognizer
|
5,811,807 |
archive_test=zipfile.ZipFile('/kaggle/input/whats-cooking/test.json.zip','r')
test_data=pd.read_json(archive_test.read('test.json'))
test_data.head()<count_values>
|
if 'label' in train_df.columns:
y_train = train_df['label'].values.astype('int32')
train_df = train_df.drop('label', axis = 1)
else:
pass
x_train = train_df.values.astype('float32')
x_test = test_df.values.astype('float32' )
|
Digit Recognizer
|
5,811,807 |
train_data['cuisine'].value_counts()<count_missing_values>
|
train_max = np.max(x_train)
train_min = np.min(x_train)
test_max = np.max(x_test)
test_min = np.min(x_test)
|
Digit Recognizer
|
5,811,807 |
train_data.isna().sum()<count_missing_values>
|
x_train = x_train/255.0
x_test = x_test/255.0
|
Digit Recognizer
|
5,811,807 |
test_data.isna().sum()<define_variables>
|
norm_train_max = np.max(x_train)
norm_train_min = np.min(x_train)
norm_test_max = np.max(x_test)
norm_test_min = np.min(x_test )
|
Digit Recognizer
|
5,811,807 |
train_ingredients_count={}
for i in range(len(train_data)) :
for j in train_data['ingredients'][i]:
if j in train_ingredients_count.keys() :
train_ingredients_count[j]+=1
else:
train_ingredients_count[j]=1<define_variables>
|
y_train= to_categorical(y_train)
num_classes = y_train.shape[1]
num_classes
|
Digit Recognizer
|
5,811,807 |
test_ingredients_count={}
for i in range(len(test_data)) :
for j in test_data['ingredients'][i]:
if j in test_ingredients_count.keys() :
test_ingredients_count[j]+=1
else:
test_ingredients_count[j]=1<define_variables>
|
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16,(5,5),activation='relu', input_shape=(28,28,1)) ,
tf.keras.layers.Conv2D(16,(5,5), activation= 'relu'),
tf.keras.layers.Conv2D(16,(5,5), activation= 'relu'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(32,(3,3), activation= 'relu'),
tf.keras.layers.Conv2D(32,(3,3), activation= 'relu'),
tf.keras.layers.Conv2D(32,(3,3), activation = 'relu'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dropout(0.25),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.50),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(256, activation = 'relu'),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(10, activation='softmax')
])
print('input shape :', model.input_shape)
print('output shape :', model.output_shape )
|
Digit Recognizer
|
5,811,807 |
ingredients_missing_train=[]
for i in test_ingredients_count.keys() :
if i not in train_ingredients_count.keys() :
ingredients_missing_train.append(i)
print(len(ingredients_missing_train))<define_variables>
|
Digit Recognizer
|
|
5,811,807 |
for i in ingredients_missing_train:
train_ingredients_count[i]=0
print(len(train_ingredients_count))<define_variables>
|
Digit Recognizer
|
|
5,811,807 |
ingredients_missing=[]
for i in train_ingredients_count.keys() :
if i not in test_ingredients_count.keys() :
ingredients_missing.append(i)
print(len(ingredients_missing))<define_variables>
|
Digit Recognizer
|
|
5,811,807 |
for i in ingredients_missing:
test_ingredients_count[i]=0
print(len(test_ingredients_count))<feature_engineering>
|
model.compile(loss = 'categorical_crossentropy', optimizer= RMSprop(lr=0.003), metrics = ['acc'] )
|
Digit Recognizer
|
5,811,807 |
for i in train_ingredients_count.keys() :
train_data[i]=np.zeros(len(train_data))
for i in test_ingredients_count.keys() :
test_data[i]=np.zeros(len(test_data))<filter>
|
train_generator = image.ImageDataGenerator()
|
Digit Recognizer
|
5,811,807 |
for i in range(len(train_data)) :
for j in train_data['ingredients'][i]:
train_data[j].iloc[i]=1<filter>
|
X = x_train
Y = y_train
X_train, X_val, Y_train , Y_val = train_test_split(x_train,y_train, test_size= 0.05, random_state = 92)
print(X_train.shape)
batches = train_generator.flow(X_train, Y_train, batch_size=32)
val_batches = train_generator.flow(X_val, Y_val, batch_size=32 )
|
Digit Recognizer
|
5,811,807 |
for i in range(len(test_data)) :
for j in test_data['ingredients'][i]:
test_data[j].iloc[i]=1<drop_column>
|
history = model.fit_generator(
generator=batches,
steps_per_epoch=batches.n,
epochs=20,
validation_data=val_batches,
validation_steps=val_batches.n,
)
|
Digit Recognizer
|
5,811,807 |
test_data=test_data[train_data.drop('cuisine',axis=1 ).columns]<prepare_x_and_y>
|
predictions = model.predict_classes(x_test, verbose=0)
|
Digit Recognizer
|
5,811,807 |
<split><EOS>
|
submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) ,
"Label": predictions})
submissions.to_csv("DR.csv", index=False, header=True )
|
Digit Recognizer
|
2,619,265 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model>
|
%matplotlib inline
keras_version = keras.__version__
tf_version = K.tensorflow_backend.tf.VERSION
print("keras version:", keras_version)
print(K.backend() , "version:", tf_version )
|
Digit Recognizer
|
2,619,265 |
lr=LogisticRegression()
lr.fit(X_train,y_train)
lr.score(X_val,y_val )<predict_on_test>
|
rawdata = np.loadtxt('.. /input/train.csv', dtype=int, delimiter=',', skiprows=1 )
|
Digit Recognizer
|
2,619,265 |
test_data['cuisine']=lr.predict(test_data.drop(['id','ingredients'],axis=1))<prepare_output>
|
y_oh = to_categorical(y, num_classes)
X_scaled = X / 127.5 - 1
X_scaled = np.expand_dims(X_scaled, -1)
num_val = int(y.shape[0] * 0.1)
validation_mask = np.zeros(y.shape[0], np.bool)
np.random.seed(1)
for c in range(num_classes):
idxs = np.random.choice(np.flatnonzero(y == c), num_val // 10, replace=False)
validation_mask[idxs] = 1
np.random.seed(None)
X_train = X_scaled[~validation_mask]
X_val = X_scaled[validation_mask]
print("Train/val pixel shapes:", X_train.shape, X_val.shape)
y_train = y_oh[~validation_mask]
y_val = y_oh[validation_mask]
print("Train/val label shapes:", y_train.shape, y_val.shape)
print("Validation Set Class Distribution:", np.bincount(y[validation_mask]))
|
Digit Recognizer
|
2,619,265 |
Submission=test_data[['id','cuisine']]
Submission.set_index('id',inplace=True )<save_to_csv>
|
def conv2D_bn_relu(x, filters, kernel_size, strides, padding='valid', kernel_initializer='glorot_uniform', name=None):
x = layers.Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
kernel_initializer=kernel_initializer,
name=name,
use_bias=False )(x)
x = layers.BatchNormalization(scale=False )(x)
return layers.Activation('relu' )(x)
def inception_module_A(x, filters=None, kernel_initializer='glorot_uniform'):
if filters is None:
filters = int(x.shape[-1])
branch_filters = filters // 4
b1 = conv2D_bn_relu(x,
filters=(branch_filters // 3)* 2,
kernel_size=1,
strides=1,
kernel_initializer=kernel_initializer)
b1 = conv2D_bn_relu(b1,
filters=branch_filters,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=kernel_initializer)
b2 = conv2D_bn_relu(x,
filters=(branch_filters // 3)* 2,
kernel_size=1,
strides=1,
kernel_initializer=kernel_initializer)
b2 = conv2D_bn_relu(b2,
filters=branch_filters,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=kernel_initializer)
b2 = conv2D_bn_relu(b2,
filters=branch_filters,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=kernel_initializer)
b3 = conv2D_bn_relu(x,
filters=branch_filters,
kernel_size=1,
strides=1,
kernel_initializer=kernel_initializer)
pool = layers.AveragePooling2D(pool_size=(3, 3), strides=1, padding='same' )(x)
pool = conv2D_bn_relu(pool,
filters=branch_filters,
kernel_size=1,
strides=1,
kernel_initializer=kernel_initializer)
return layers.concatenate([b1, b2, b3, pool])
def inception_module_C(x, filters=None, kernel_initializer='glorot_uniform'):
if filters is None:
filters = int(x.shape[-1])
branch_filters = filters // 6
b1 = conv2D_bn_relu(x,
filters=(branch_filters // 2)* 3,
kernel_size=1,
strides=1,
kernel_initializer=kernel_initializer)
b1a = conv2D_bn_relu(b1,
filters=branch_filters,
kernel_size=(1, 3),
strides=1,
padding='same',
kernel_initializer=kernel_initializer)
b1b = conv2D_bn_relu(b1,
filters=branch_filters,
kernel_size=(3, 1),
strides=1,
padding='same',
kernel_initializer=kernel_initializer)
b2 = conv2D_bn_relu(x,
filters=(branch_filters // 2)* 3,
kernel_size=1,
strides=1,
kernel_initializer=kernel_initializer)
b2 = conv2D_bn_relu(b2,
filters=(branch_filters // 4)* 7,
kernel_size=(1, 3),
strides=1,
padding='same',
kernel_initializer=kernel_initializer)
b2 = conv2D_bn_relu(b2,
filters=branch_filters * 2,
kernel_size=(3, 1),
strides=1,
padding='same',
kernel_initializer=kernel_initializer)
b2a = conv2D_bn_relu(b2,
filters=branch_filters,
kernel_size=(1, 3),
strides=1,
padding='same',
kernel_initializer=kernel_initializer)
b2b = conv2D_bn_relu(b2,
branch_filters,
kernel_size=(3, 1),
strides=1,
padding='same',
kernel_initializer=kernel_initializer)
b3 = conv2D_bn_relu(x,
filters=branch_filters,
kernel_size=1,
strides=1,
kernel_initializer=kernel_initializer)
pool = layers.AveragePooling2D(pool_size=(3, 3), strides=1, padding='same' )(x)
pool = conv2D_bn_relu(pool,
filters=branch_filters,
kernel_size=1,
strides=1,
kernel_initializer=kernel_initializer)
return layers.concatenate([b1a, b1b, b2a, b2b, b3, pool] )
|
Digit Recognizer
|
2,619,265 |
Submission.to_csv('Submission.csv' )<import_modules>
|
K.clear_session()
stem_width = 64
inputs = layers.Input(shape=X_scaled.shape[1:])
x = conv2D_bn_relu(inputs,
filters=stem_width,
kernel_size=5,
strides=2,
padding='valid',
name='conv_1')
x = inception_module_A(x, filters=int(1.5*stem_width))
x = layers.SpatialDropout2D(0.2 )(x)
x = inception_module_A(x, filters=int(1.5*stem_width))
x = layers.MaxPooling2D(pool_size=(3, 3), strides=2, padding='same' )(x)
x = layers.SpatialDropout2D(0.2 )(x)
x = inception_module_C(x, filters=int(2.25*stem_width))
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.3 )(x)
x = layers.Dense(num_classes, name='logits' )(x)
x = layers.Activation('softmax', name='softmax' )(x)
model = Model(inputs=inputs, outputs=x)
model.summary()
|
Digit Recognizer
|
2,619,265 |
import xgboost as xgb
import numpy as np
import pandas as pd
import random
import optuna
from sklearn.model_selection import KFold, train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error<load_from_csv>
|
epsilon = 0.001
y_train_smooth = y_train *(1 - epsilon)+ epsilon / 10
print(y_train_smooth )
|
Digit Recognizer
|
2,619,265 |
train = pd.read_csv(".. /input/tabular-playground-series-feb-2021/train.csv")
test = pd.read_csv(".. /input/tabular-playground-series-feb-2021/test.csv" )<categorify>
|
def elastic_transform(image, alpha_range, sigma, random_state=None):
if random_state is None:
random_state = np.random.RandomState(None)
if np.isscalar(alpha_range):
alpha = alpha_range
else:
alpha = np.random.uniform(low=alpha_range[0], high=alpha_range[1])
shape = image.shape
dx = gaussian_filter(( random_state.rand(*shape)* 2 - 1), sigma)* alpha
dy = gaussian_filter(( random_state.rand(*shape)* 2 - 1), sigma)* alpha
x, y, z = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2]), indexing='ij')
indices = np.reshape(x+dx,(-1, 1)) , np.reshape(y+dy,(-1, 1)) , np.reshape(z,(-1, 1))
return map_coordinates(image, indices, order=1, mode='reflect' ).reshape(shape )
|
Digit Recognizer
|
2,619,265 |
df=train
for c in df.columns:
if df[c].dtype=='object':
lbl = LabelEncoder()
df[c]=df[c].fillna('N')
lbl.fit(list(df[c].values))
df[c] = lbl.transform(df[c].values)
train=df<categorify>
|
class CosineAnneal(keras.callbacks.Callback):
def __init__(self, max_lr, min_lr, T, T_mul=1, decay_rate=1.0):
self.max_lr = max_lr
self.min_lr = min_lr
self.decay_rate = decay_rate
self.T = T
self.T_cur = 0
self.T_mul = T_mul
self.step = 0
def on_batch_begin(self, batch, logs=None):
if self.T <= self.T_cur:
self.max_lr *= self.decay_rate
self.min_lr *= self.decay_rate
self.T *= self.T_mul
self.T_cur = 0
self.step = 0
lr = self.min_lr + 0.5 *(self.max_lr - self.min_lr)*(1 + np.cos(self.T_cur * np.pi / self.T))
K.set_value(self.model.optimizer.lr, lr)
self.step += 1
self.T_cur = self.step / self.params['steps']
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.