kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
9,309,487 |
outputs = roberta_seq.predict(test_dataset)
y_pred = outputs[0].argmax(axis=1 )<compute_test_metric>
|
model.add(Conv2D(64,(3,3),padding='same',activation= 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(64,(3,3),padding='same',activation= 'relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.4))
|
Digit Recognizer
|
9,309,487 |
print('Confusion matrix:')
print(confusion_matrix(y_test,y_pred,labels=[0,1]))
print()
print('Classification report:')
print(classification_report(y_test,y_pred,labels=[0,1],target_names=['not a disaster','disaster']))<define_variables>
|
model.add(Dense(256,activation= 'relu'))
model.add(Dropout(0.25))
model.add(Dense(128,activation= 'relu'))
model.add(Dropout(0.50))
model.add(Dense(10,activation= 'softmax'))
|
Digit Recognizer
|
9,309,487 |
tweets_test = list(df_test['text'])
tweets_test = process_tweets(tweets_test)
X_real_test = roberta_tokenizer(tweets_test,padding='max_length',max_length=max_len,return_tensors='tf')
real_test_dataset = tf.data.Dataset.from_tensor_slices(dict(X_real_test))
real_test_dataset = real_test_dataset.batch(batch_size)
real_test_dataset<predict_on_test>
|
optimizer =Adam(lr=0.004 )
|
Digit Recognizer
|
9,309,487 |
outputs_test = roberta_seq.predict(real_test_dataset)
y_pred_test = outputs_test[0].argmax(axis=1 )<save_to_csv>
|
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
|
Digit Recognizer
|
9,309,487 |
results = pd.Series(y_pred_test,index=df_test.index,name='target')
results.to_csv('./submission.csv' )<import_modules>
|
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
Image("model.png")
|
Digit Recognizer
|
9,309,487 |
import pandas as pd
import numpy as np
import cv2
from glob import glob
import sklearn
from sklearn.model_selection import GroupKFold, StratifiedKFold
from sklearn.metrics import roc_auc_score, log_loss
from sklearn import metrics
from sklearn.metrics import log_loss
from skimage import io
import os
from datetime import datetime
import time
import random
import torchvision
from torchvision import transforms
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data import Dataset,DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from torch.cuda.amp import autocast, GradScaler
import warnings
import joblib
from scipy.ndimage.interpolation import zoom<init_hyperparams>
|
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x)
|
Digit Recognizer
|
9,309,487 |
CFG = {
'fold_num': 12,
'seed': 719,
'model_arch': 'tf_efficientnet_b3_ns',
'img_size': 384,
'epochs': 120,
'train_bs': 28,
'valid_bs': 32,
'lr': 1e-2,
'num_workers': 5,
'accum_iter': 1,
'verbose_step': 2,
'device': 'cuda:0',
'tta': 10,
'used_epochs': [6,7,8,9],
'weights': [1,1,1,1]
}<set_options>
|
datagen = ImageDataGenerator(zoom_range = 0.2,
)
datagen.fit(x_train )
|
Digit Recognizer
|
9,309,487 |
def all_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True<normalization>
|
hist = model.fit_generator(datagen.flow(x_train, y_train, batch_size=256),
steps_per_epoch=600,
epochs=15,
verbose=1,
validation_data=(x_test, y_test)
)
|
Digit Recognizer
|
9,309,487 |
def get_img(path):
im_bgr = cv2.imread(path)
im_rgb = im_bgr[:, :, ::-1]
return im_rgb<load_pretrained>
|
y_pred = model.predict(test, verbose = 1)
|
Digit Recognizer
|
9,309,487 |
img = get_img('.. /input/cassava-leaf-disease-classification/train_images/1000015157.jpg')
plt.imshow(img)
plt.show()<load_from_csv>
|
predictions=[]
for i in range(len(test)) :
a=np.where(y_pred[i] == max(y_pred[i]))
predictions.append(a[0][0] )
|
Digit Recognizer
|
9,309,487 |
<count_values><EOS>
|
counter = range(1, len(predictions)+ 1)
solution = pd.DataFrame({"ImageId": counter, "label": list(predictions)})
solution.to_csv("digit_recognizer8.csv", index = False )
|
Digit Recognizer
|
9,281,452 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model, load_model
from keras.layers import Conv2D, Input, MaxPooling2D, Dense, Dropout, Flatten
from keras.layers import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
9,281,452 |
sample_submission = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv')
sample_submission.head()<categorify>
|
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")
X = train.drop(['label'],1 ).values
Y = train['label'].values
x_test = test.values
X = X/255.
x_test = x_test/255.
X = X.reshape(-1,28,28,1)
x_test = x_test.reshape(-1,28,28,1)
Y = to_categorical(Y)
|
Digit Recognizer
|
9,281,452 |
class CassavaDataset(Dataset):
def __init__(self, df, data_root, transforms = None, output_label = True):
super().__init__()
self.df = df.reset_index(drop = True ).copy()
self.transforms = transforms
self.data_root = data_root
self.output_label = output_label
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index: int):
path = "{}/{}".format(self.data_root, self.df.iloc[index]['image_id'])
img = get_img(path)
if self.transforms:
img = self.transforms(image = img)['image']
if self.output_label:
target = self.df.iloc[index]['label']
if self.output_label:
return img, target
else:
return img<import_modules>
|
x_train, x_valid, y_train, y_valid = train_test_split(X,Y, test_size=0.1 )
|
Digit Recognizer
|
9,281,452 |
HorizontalFlip, VerticalFlip, Transpose, ShiftScaleRotate,
HueSaturationValue,RandomResizedCrop, RandomBrightnessContrast,
Compose, Normalize, Cutout, CoarseDropout, CenterCrop, Resize
)
<categorify>
|
def get_model() :
In = Input(shape=(28,28,1))
x = Conv2D(32,(3,3), padding="same" )(In)
x = LeakyReLU(alpha=0.01 )(x)
x = Conv2D(32,(3,3), padding="same" )(x)
x = LeakyReLU(alpha=0.01 )(x)
x = BatchNormalization()(x)
x = MaxPooling2D(( 2,2))(x)
x = Conv2D(64,(3,3), padding="same" )(x)
x = LeakyReLU(alpha=0.01 )(x)
x = Conv2D(64,(3,3), padding="same" )(x)
x = LeakyReLU(alpha=0.01 )(x)
x = BatchNormalization()(x)
x = MaxPooling2D(( 2,2))(x)
x = Dropout(0.2 )(x)
x = Conv2D(128,(3,3), padding="same" )(x)
x = LeakyReLU(alpha=0.01 )(x)
x = Conv2D(128,(3,3), padding="same" )(x)
x = LeakyReLU(alpha=0.01 )(x)
x = BatchNormalization()(x)
x = MaxPooling2D(( 2,2))(x)
x = Dropout(0.2 )(x)
x = Flatten()(x)
x = Dense(512 )(x)
x = LeakyReLU(alpha=0.01 )(x)
x = BatchNormalization()(x)
x = Dropout(0.2 )(x)
x = Dense(256 )(x)
x = LeakyReLU(alpha=0.01 )(x)
x = BatchNormalization()(x)
x = Dropout(0.2 )(x)
x = Dense(128 )(x)
x = LeakyReLU(alpha=0.01 )(x)
x = BatchNormalization()(x)
x = Dropout(0.2 )(x)
Out = Dense(10, activation="softmax" )(x)
model = Model(In, Out)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
model = get_model()
model.summary()
|
Digit Recognizer
|
9,281,452 |
def get_train_transforms() :
return Compose([
RandomResizedCrop(CFG['img_size'], CFG['img_size']),
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
ShiftScaleRotate(p=0.5),
HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
CoarseDropout(p=0.5),
Cutout(p=0.5),
ToTensorV2(p=1.0),
], p=1.)<categorify>
|
best_checkpoint = ModelCheckpoint('best.hdf5',monitor = 'val_loss', mode = "min", verbose = 1, save_best_only = True)
lr_reduction = ReduceLROnPlateau(monitor = 'val_loss', patience = 3, verbose = 1, factor = 0.5, min_lr = 1e-6 )
|
Digit Recognizer
|
9,281,452 |
def get_inference_transforms() :
return Compose([
RandomResizedCrop(CFG['img_size'], CFG['img_size']),
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)<define_variables>
|
data_generator = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
epochs = 100
batch_size = 128
train_generator = data_generator.flow(x_train, y_train, batch_size=batch_size)
valid_generator = data_generator.flow(x_valid, y_valid, batch_size=batch_size )
|
Digit Recognizer
|
9,281,452 |
package_path = '.. /input/pytorch-image-models/pytorch-image-models-master'
sys.path.append(package_path)
<import_modules>
|
hist = model.fit_generator(train_generator, epochs=epochs, steps_per_epoch = x_train.shape[0]//batch_size,
validation_data = valid_generator, validation_steps = x_valid.shape[0]//batch_size, callbacks=[best_checkpoint, lr_reduction], verbose=1 )
|
Digit Recognizer
|
9,281,452 |
class CassavaImgClassifier(nn.Module):
def __init__(self, model_arch, n_class, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained)
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, n_class)
def forward(self, x):
x = self.model(x)
return x<define_variables>
|
best = load_model("best.hdf5")
preds = best.predict(x_test, verbose=1)
preds = np.array([np.argmax(i)for i in preds])
preds
|
Digit Recognizer
|
9,281,452 |
<import_modules><EOS>
|
submission['Label'] = preds
submission.to_csv("submission.csv", index=False)
submission.head()
|
Digit Recognizer
|
8,147,528 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
|
train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
8,147,528 |
if __name__ == '__main__':
all_seed(CFG['seed'])
folds = StratifiedKFold(n_splits=CFG['fold_num'] ).split(np.arange(train.shape[0]), train.label.values)
for fold,(trn_idx, val_idx)in enumerate(folds):
if fold > 0:
break
print('Inference fold {} started'.format(fold))
valid_ = train.loc[val_idx,:].reset_index(drop=True)
valid_ds = CassavaDataset(valid_, '.. /input/cassava-leaf-disease-classification/train_images/',
transforms = get_inference_transforms() , output_label=False)
test = pd.DataFrame()
test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'))
test_ds = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/',
transforms = get_inference_transforms() , output_label=False)
val_loader = torch.utils.data.DataLoader(
valid_ds,
batch_size = CFG['valid_bs'],
num_workers = CFG['num_workers'],
shuffle = False,
pin_memory = False,
)
tst_loader = torch.utils.data.DataLoader(
test_ds,
batch_size = CFG['valid_bs'],
num_workers = CFG['num_workers'],
shuffle = False,
pin_memory = False,
)
device = torch.device(CFG['device'])
model = EnsembleClassifier(CFG['model_arch'], train.label.nunique() ).to(device)
val_preds = []
tst_preds = []
for i, epoch in enumerate(CFG['used_epochs']):
model.load(torch.load('.. /input/fork-pytorch-efficientnet-baseline-train-amp-a/{}_fold_{}_{}'.format(CFG['model_arch'], fold, epoch)))
with torch.no_grad() :
for _ in range(CFG['tta']):
val_preds += [CFG['weights'][i]/sum(CFG['weights'])/CFG['tta']*inference_one_epoch(model, val_loader, device)]
tst_preds += [CFG['weights'][i]/sum(CFG['weights'])/CFG['tta']*inference_one_epoch(model, tst_loader, device)]
val_preds = np.mean(val_preds, axis=0)
tst_preds = np.mean(tst_preds, axis=0)
print('fold {} validation loss = {:.5f}'.format(fold, log_loss(valid_.label.values, val_preds)))
print('fold {} validation accuracy = {:.5f}'.format(fold,(valid_.label.values==np.argmax(val_preds, axis=1)).mean()))
del model
torch.cuda.empty_cache()<feature_engineering>
|
X_train = train.drop(labels=["label"], axis=1)
Y_train = train['label']
del train
X_train = X_train / 255.
test = test / 255.
X_train = X_train.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1)
|
Digit Recognizer
|
8,147,528 |
test['label'] = np.argmax(tst_preds, axis=1)
test.head()<save_to_csv>
|
X_train, X_val, Y_train, Y_val = train_test_split(
X_train,
Y_train,
test_size=0.1,
random_state=42
)
|
Digit Recognizer
|
8,147,528 |
test.to_csv('submission.csv', index = False )<define_variables>
|
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range=(1.15, 0.95),
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False,
shear_range=5
)
|
Digit Recognizer
|
8,147,528 |
package_paths = [
'.. /input/pytorch-image-models/pytorch-image-models-master',
'.. /input/adamp-optimizer/AdamP-master/adamp'
]
for pth in package_paths:
sys.path.append(pth )<import_modules>
|
init = RandomNormal(stddev=0.02)
model = Sequential([
Conv2D(32, 3, input_shape=(28, 28, 1), activation='relu', kernel_initializer=init),
BatchNormalization() ,
Conv2D(32, 3, activation='relu', kernel_initializer=init),
BatchNormalization() ,
Conv2D(32, 5, strides=2, padding='same', activation='relu', kernel_initializer=init),
BatchNormalization() ,
Dropout(0.4),
Conv2D(64, 3, activation='relu', kernel_initializer=init),
BatchNormalization() ,
Conv2D(64, 3, activation='relu', kernel_initializer=init),
BatchNormalization() ,
Conv2D(64, 5, strides=2, padding='same', activation='relu', kernel_initializer=init),
BatchNormalization() ,
Dropout(0.4),
Conv2D(128, 4, activation='relu', kernel_initializer=init),
BatchNormalization() ,
Flatten() ,
Dropout(0.4),
Dense(10, activation='softmax')
])
model.summary()
|
Digit Recognizer
|
8,147,528 |
from glob import glob
from sklearn.model_selection import GroupKFold, StratifiedKFold
import cv2
from skimage import io
import torch
from torch import nn
import os
from datetime import datetime
import time
import random
import cv2
import torchvision
from torchvision import transforms
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from torch.utils.data import Dataset,DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from torch.cuda.amp import autocast, GradScaler
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.functional as F
import timm
import sklearn
import warnings
import joblib
from sklearn.metrics import roc_auc_score, log_loss
from sklearn import metrics
import warnings
import cv2
from scipy.ndimage.interpolation import zoom<set_options>
|
model.compile(
optimizer=Adam(lr=1e-3),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
|
Digit Recognizer
|
8,147,528 |
CFG = {
'valid': False,
'fold_num': 5,
'seed': 719,
'model_arch1': 'tf_efficientnet_b4_ns',
'model_arch2': 'tf_efficientnet_b4_ns',
'model_arch3' : 'regnety_040',
'model_arch4' : 'regnety_040',
'model_arch5': 'tf_efficientnet_b4_ns',
'model_arch6': 'regnety_040',
'ckpt_path2': 'regnety4noresetadamp',
'ckpt_path3': 'regnety4nocv',
'weight' : [1/6 for _ in range(6)],
'img_size1': 512,
'img_size2': 512,
'img_size3': 512,
'img_size4': 512,
'epochs': 10,
'tta_num' : 3,
'train_bs': 64,
'valid_bs': 64,
'T_0': 10,
'lr': 1e-4,
'min_lr': 1e-6,
'weight_decay':1e-6,
'num_workers': 4,
'accum_iter': 2,
'verbose_step': 1,
'device': 'cuda:0',
'used_epochs': [5, 6, 7, 8, 9]
}<load_from_csv>
|
learning_rate_reduction = ReduceLROnPlateau(
monitor='val_sparse_categorical_accuracy',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001
)
|
Digit Recognizer
|
8,147,528 |
train = pd.read_csv('.. /input/cassava-leaf-disease-classification/train.csv')
train.head()<load_from_csv>
|
history = model.fit(
datagen.flow(X_train, Y_train, batch_size=64),
epochs=45,
validation_data=(X_val, Y_val),
callbacks=[learning_rate_reduction],
use_multiprocessing=True
)
|
Digit Recognizer
|
8,147,528 |
<set_options><EOS>
|
results = model.predict(test)
results = np.argmax(results, axis=1)
submission = pd.concat([
pd.Series(range(1,28001), name="ImageId"),
pd.Series(results, name="Label")
], axis=1)
submission.to_csv("submission.csv", index=False )
|
Digit Recognizer
|
1,021,412 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<prepare_x_and_y>
|
%matplotlib inline
|
Digit Recognizer
|
1,021,412 |
def rand_bbox(size, lam):
W = size[0]
H = size[1]
cut_rat = np.sqrt(1.- lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
class CassavaDataset(Dataset):
def __init__(self, df, data_root,
transforms=None,
output_label=True,
):
super().__init__()
self.df = df.reset_index(drop=True ).copy()
self.transforms = transforms
self.data_root = data_root
self.output_label = output_label
if self.output_label == True:
self.labels = self.df['label'].values
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index: int):
if self.output_label:
target = self.labels[index]
img = get_img("{}/{}".format(self.data_root, self.df.loc[index]['image_id']))
if self.transforms:
img = self.transforms(image=img)['image']
if self.output_label == True:
return img, target
else:
return img<categorify>
|
train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/test.csv")
|
Digit Recognizer
|
1,021,412 |
HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize, Rotate,
ShiftScaleRotate, CenterCrop, Resize, Rotate, RandomShadow, RandomSizedBBoxSafeCrop,
ChannelShuffle, MotionBlur
)
def get_train_transforms() :
return Compose([
RandomResizedCrop(CFG['img_size1'], CFG['img_size1']),
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
ShiftScaleRotate(p=0.5),
HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
CoarseDropout(p=0.5),
Cutout(p=0.5),
ToTensorV2(p=1.0),
], p=1.)
def get_valid_transforms() :
return Compose([
CenterCrop(CFG['img_size1'], CFG['img_size1'], p=1.) ,
Resize(CFG['img_size1'], CFG['img_size1']),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)
def get_inference_transforms1() :
return Compose([
OneOf([
Resize(CFG['img_size1'], CFG['img_size1'], p=1.) ,
CenterCrop(CFG['img_size1'], CFG['img_size1'], p=1.) ,
RandomResizedCrop(CFG['img_size1'], CFG['img_size1'], p=1.)
], p=1.) ,
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
Resize(CFG['img_size1'], CFG['img_size1']),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)
def get_inference_transforms2() :
return Compose([
OneOf([
Resize(CFG['img_size2'], CFG['img_size2'], p=1.) ,
CenterCrop(CFG['img_size2'], CFG['img_size2'], p=1.) ,
RandomResizedCrop(CFG['img_size2'], CFG['img_size2'], p=1.)
], p=1.) ,
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
Resize(CFG['img_size2'], CFG['img_size2']),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)<choose_model_class>
|
train_data = train_df.values
test_data = test_df.values
|
Digit Recognizer
|
1,021,412 |
class CassvaImgClassifier(nn.Module):
def __init__(self, model_arch, n_class, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained)
if model_arch == 'regnety_040':
self.model.head = nn.Sequential(
nn.AdaptiveAvgPool2d(( 1,1)) ,
nn.Flatten() ,
nn.Linear(1088, n_class)
)
elif model_arch == 'regnety_320':
self.model.head = nn.Sequential(
nn.AdaptiveAvgPool2d(( 1,1)) ,
nn.Flatten() ,
nn.Linear(3712, n_class)
)
elif model_arch == 'regnety_080':
self.model.head = nn.Sequential(
nn.AdaptiveAvgPool2d(( 1,1)) ,
nn.Flatten() ,
nn.Linear(2016, n_class)
)
elif model_arch == 'regnety_160':
self.model.head = nn.Sequential(
nn.AdaptiveAvgPool2d(( 1,1)) ,
nn.Flatten() ,
nn.Linear(3024, n_class)
)
else:
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, n_class)
def forward(self, x):
x = self.model(x)
return x<choose_model_class>
|
labels = train_data[:,0]
train = train_data[:,1:]/255
|
Digit Recognizer
|
1,021,412 |
class CassvaImgClassifier_ViT(nn.Module):
def __init__(self, model_arch, n_class, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained)
self.model.head = nn.Linear(self.model.head.in_features, n_class)
for module in self.model.modules() :
if isinstance(module, nn.BatchNorm2d):
if hasattr(module, 'weight'):
module.weight.requires_grad_(False)
if hasattr(module, 'bias'):
module.bias.requires_grad_(False)
def forward(self, x):
x = self.model(x)
return x<create_dataframe>
|
dummy_y = keras.utils.to_categorical(labels)
x_train, x_test, y_train, y_test = train_test_split(train, dummy_y, test_size=0.1, random_state=166,stratify=labels )
|
Digit Recognizer
|
1,021,412 |
def prepare_dataloader(df, trn_idx, val_idx, data_root='.. /input/cassava-leaf-disease-classification/train_images/'):
train_ = df.loc[trn_idx,:].reset_index(drop=True)
valid_ = df.loc[val_idx,:].reset_index(drop=True)
train_ds = CassavaDataset(train_, data_root, transforms=get_train_transforms() , output_label=True)
valid_ds = CassavaDataset(valid_, data_root, transforms=get_valid_transforms() , output_label=True)
train_loader = torch.utils.data.DataLoader(
train_ds,
batch_size=CFG['train_bs'],
pin_memory=False,
drop_last=False,
shuffle=True,
num_workers=CFG['num_workers'],
)
val_loader = torch.utils.data.DataLoader(
valid_ds,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
return train_loader, val_loader
def train_one_epoch(epoch, model, loss_fn, optimizer, train_loader, device, scheduler=None, schd_batch_update=False):
model.train()
t = time.time()
running_loss = None
for step,(imgs, image_labels)in enumerate(train_loader):
imgs = imgs.to(device ).float()
image_labels = image_labels.to(device ).long()
with autocast() :
image_preds = model(imgs)
loss = loss_fn(image_preds, image_labels)
scaler.scale(loss ).backward()
if running_loss is None:
running_loss = loss.item()
else:
running_loss = running_loss *.99 + loss.item() *.01
if(( step + 1)% CFG['accum_iter'] == 0)or(( step + 1)== len(train_loader)) :
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if scheduler is not None and schd_batch_update:
scheduler.step()
if scheduler is not None and not schd_batch_update:
scheduler.step()
def valid_one_epoch(epoch, model, loss_fn, val_loader, device, scheduler=None, schd_loss_update=False):
model.eval()
t = time.time()
loss_sum = 0
sample_num = 0
image_preds_all = []
image_targets_all = []
for step,(imgs, image_labels)in enumerate(val_loader):
imgs = imgs.to(device ).float()
image_labels = image_labels.to(device ).long()
image_preds = model(imgs)
image_preds_all += [torch.argmax(image_preds, 1 ).detach().cpu().numpy() ]
image_targets_all += [image_labels.detach().cpu().numpy() ]
loss = loss_fn(image_preds, image_labels)
loss_sum += loss.item() *image_labels.shape[0]
sample_num += image_labels.shape[0]
image_preds_all = np.concatenate(image_preds_all)
image_targets_all = np.concatenate(image_targets_all)
print('epoch = {}'.format(epoch+1), 'validation multi-class accuracy = {:.4f}'.format(( image_preds_all==image_targets_all ).mean()))
if scheduler is not None:
if schd_loss_update:
scheduler.step(loss_sum/sample_num)
else:
scheduler.step()
def inference_one_epoch(model, data_loader, device):
model.eval()
image_preds_all = []
with torch.no_grad() :
for step,(imgs)in enumerate(data_loader):
imgs = imgs.to(device ).float()
image_preds = model(imgs)
image_preds_all += [torch.softmax(image_preds, 1 ).detach().cpu().numpy() ]
image_preds_all = np.concatenate(image_preds_all, axis=0)
return image_preds_all<train_model>
|
model = Sequential()
callbacks = [keras.callbacks.ModelCheckpoint('minist.h5', monitor='val_acc', verbose=1, save_best_only=True,
mode='auto')]
model.add(Conv2D(64, kernel_size=(3, 3),
activation='relu',padding='same',
input_shape=(28,28,1)))
model.add(Conv2D(64,(3, 3),padding='same', activation='relu'))
model.add(Conv2D(128,(3, 3),padding='same', activation='relu'))
model.add(Conv2D(128,(28, 28),activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary()
sgd = SGD(lr=0.01, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=128,
epochs=50,
verbose=1,
validation_data=(x_test, y_test),callbacks=callbacks )
|
Digit Recognizer
|
1,021,412 |
def freeze_batchnorm_stats(net):
try:
for m in net.modules() :
if isinstance(m,nn.BatchNorm2d)or isinstance(m,nn.LayerNorm):
m.eval()
except ValuError:
print('error with batchnorm2d or layernorm')
return
def unfreeze_batchnorm_stats(net):
try:
for m in net.modules() :
if isinstance(m,nn.BatchNorm2d)or isinstance(m,nn.LayerNorm):
m.train()
except ValuError:
print('error with batchnorm2d or layernorm')
return<normalization>
|
model.load_weights('minist.h5' )
|
Digit Recognizer
|
1,021,412 |
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, smoothing=0.1):
super(LabelSmoothingCrossEntropy, self ).__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1.- smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()<feature_engineering>
|
predict = model.predict(test )
|
Digit Recognizer
|
1,021,412 |
if __name__ == '__main__':
seed_everything(CFG['seed'])
oof_preds = np.zeros(len(train))
print('Model 1 Start')
sub1 = []
folds = StratifiedKFold(n_splits=CFG['fold_num'] ).split(np.arange(train.shape[0]), train.label.values)
for fold,(trn_idx, val_idx)in enumerate(folds):
print('Inference fold {} started'.format(fold))
test = pd.DataFrame()
test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'))
test_ds2 = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms2() , output_label=False)
tst_loader2 = torch.utils.data.DataLoader(
test_ds2,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(CFG['device'])
model1 = CassvaImgClassifier(CFG['model_arch1'], train.label.nunique() ).to(device)
tst_preds = []
model1.load_state_dict(torch.load('.. /input/leaf-weight-v9-2/model9_2/swa_{}_fold_{}_{}'.format(CFG['model_arch1'], fold, '9')))
for tta in range(CFG['tta_num']):
tst_preds += [inference_one_epoch(model1, tst_loader2, device)]
sub1 += [np.mean(tst_preds, axis=0)]
del model1;
torch.cuda.empty_cache()
print('Model 2 Start')
sub2 = []
folds = StratifiedKFold(n_splits=CFG['fold_num'] ).split(np.arange(train.shape[0]), train.label.values)
for fold,(trn_idx, val_idx)in enumerate(folds):
print('Inference fold {} started'.format(fold))
test = pd.DataFrame()
test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'))
test_ds2 = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms2() , output_label=False)
tst_loader2 = torch.utils.data.DataLoader(
test_ds2,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(CFG['device'])
model2 = CassvaImgClassifier(CFG['model_arch2'], train.label.nunique() ).to(device)
tst_preds = []
model2.load_state_dict(torch.load('.. /input/905-training-efficientnetb4-merged-bs32/swa_{}_fold_{}_{}'.format(CFG['model_arch2'], fold, '9')))
for tta in range(CFG['tta_num']):
tst_preds += [inference_one_epoch(model2, tst_loader2, device)]
sub2 += [np.mean(tst_preds, axis=0)]
del model2;
torch.cuda.empty_cache()
print('Model 3 Start')
sub3 = []
folds = StratifiedKFold(n_splits=CFG['fold_num'] ).split(np.arange(train.shape[0]), train.label.values)
for fold,(trn_idx, val_idx)in enumerate(folds):
print('Inference fold {} started'.format(fold))
test = pd.DataFrame()
test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'))
test_ds2 = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms2() , output_label=False)
tst_loader2 = torch.utils.data.DataLoader(
test_ds2,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(CFG['device'])
model3 = CassvaImgClassifier(CFG['model_arch3'], train.label.nunique() ).to(device)
tst_preds = []
model3.load_state_dict(torch.load('.. /input/regnety4noresetadamp/swa_{}_fold_{}_{}'.format(CFG['model_arch3'], fold, '19')))
for tta in range(CFG['tta_num']):
tst_preds += [inference_one_epoch(model3, tst_loader2, device)]
sub3 += [np.mean(tst_preds, axis=0)]
del model3;
torch.cuda.empty_cache()
print('Model 4 Start')
sub4 = []
folds = StratifiedKFold(n_splits=CFG['fold_num'] ).split(np.arange(train.shape[0]), train.label.values)
for fold,(trn_idx, val_idx)in enumerate(folds):
print('Inference fold {} started'.format(fold))
test = pd.DataFrame()
test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'))
test_ds2 = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms2() , output_label=False)
tst_loader2 = torch.utils.data.DataLoader(
test_ds2,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(CFG['device'])
model4 = CassvaImgClassifier(CFG['model_arch4'], train.label.nunique() ).to(device)
tst_preds = []
model4.load_state_dict(torch.load('.. /input/0214v1-hwkim-regnet-40-reset-swalr-swastep-ep24/swa_{}_fold_{}_{}'.format(CFG['model_arch4'], fold, '23')))
for tta in range(CFG['tta_num']):
tst_preds += [inference_one_epoch(model4, tst_loader2, device)]
sub4 += [np.mean(tst_preds, axis=0)]
del model4;
torch.cuda.empty_cache()
print('Model 5 Start')
sub5 = []
folds = StratifiedKFold(n_splits=CFG['fold_num'] ).split(np.arange(train.shape[0]), train.label.values)
for fold,(trn_idx, val_idx)in enumerate(folds):
print('Inference fold {} started'.format(fold))
test = pd.DataFrame()
test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'))
test_ds2 = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms2() , output_label=False)
tst_loader2 = torch.utils.data.DataLoader(
test_ds2,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(CFG['device'])
model5 = CassvaImgClassifier(CFG['model_arch5'], train.label.nunique() ).to(device)
tst_preds = []
model5.load_state_dict(torch.load('.. /input/905-training-efficientnetb4-seed720/swa_{}_fold_{}_{}'.format(CFG['model_arch5'], fold, '9')))
for tta in range(CFG['tta_num']):
tst_preds += [inference_one_epoch(model5, tst_loader2, device)]
sub5 += [np.mean(tst_preds, axis=0)]
del model5;
torch.cuda.empty_cache()
print('Model 6 Start')
sub6 = []
folds = StratifiedKFold(n_splits=CFG['fold_num'] ).split(np.arange(train.shape[0]), train.label.values)
for fold,(trn_idx, val_idx)in enumerate(folds):
print('Inference fold {} started'.format(fold))
test = pd.DataFrame()
test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'))
test_ds2 = CassavaDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_inference_transforms2() , output_label=False)
tst_loader2 = torch.utils.data.DataLoader(
test_ds2,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(CFG['device'])
model6 = CassvaImgClassifier(CFG['model_arch6'], train.label.nunique() ).to(device)
tst_preds = []
model6.load_state_dict(torch.load('.. /input/reg-distill/swa_{}_fold_{}_{}'.format(CFG['model_arch6'], fold, '19')))
for tta in range(CFG['tta_num']):
tst_preds += [inference_one_epoch(model6, tst_loader2, device)]
sub6 += [np.mean(tst_preds, axis=0)]
del model6;
torch.cuda.empty_cache()
sub1 = [e * CFG['weight'][0] for e in sub1]
sub2 = [e * CFG['weight'][1] for e in sub2]
sub3 = [e * CFG['weight'][2] for e in sub3]
sub4 = [e * CFG['weight'][3] for e in sub4]
sub5 = [e * CFG['weight'][4] for e in sub5]
sub6 = [e * CFG['weight'][5] for e in sub6]
sub = [e1 + e2 + e3 + e4 + e5 + e6 for(e1, e2, e3, e4, e5, e6)in zip(sub1,sub2,sub3, sub4, sub5, sub6)]<feature_engineering>
|
results = np.argmax(predict,axis = 1 )
|
Digit Recognizer
|
1,021,412 |
test['label'] = np.argmax(np.mean(sub, axis=0), axis=1)
test.head()<save_to_csv>
|
submission = pd.DataFrame({"ImageId":range(1,28001),"Label":results})
submission.to_csv("cnn_mnist.csv",index=False )
|
Digit Recognizer
|
4,249,570 |
test.to_csv('submission.csv', index=False )<define_variables>
|
input_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/test.csv" )
|
Digit Recognizer
|
4,249,570 |
CONFIG_NAME = 'stacking12.yml'
debug = False
STAGE2_DIR = '.. /input/train-stacking-2dcnn-ver3/output'<define_variables>
|
input_data = input_df.drop(['label'], axis=1 ).values / 255.0
input_labels = input_df['label']
test_data = test_df.values / 255.0
train_data, valid_data, train_labels, valid_labels = train_test_split(input_data, input_labels, test_size = 0.15, random_state=2)
train_data = train_data.reshape(-1,28,28,1)
valid_data = valid_data.reshape(-1,28,28,1)
test_data = test_data.reshape(-1,28,28,1)
train_labels = to_categorical(train_labels, 10)
valid_labels = to_categorical(valid_labels, 10)
print("Train: ",train_data.shape, train_labels.shape)
print("Valid: ",valid_data.shape, valid_labels.shape)
print("Test: ",test_data.shape )
|
Digit Recognizer
|
4,249,570 |
CONFIG_PATH = f'{STAGE2_DIR}/{CONFIG_NAME}'
with open(CONFIG_PATH)as f:
config = yaml.load(f)
INFO = config['info']
TAG = config['tag']
CFG = config['cfg']
OUTPUT_DIR = './'
DATA_PATH = '.. /input/cassava-leaf-disease-classification'<define_variables>
|
data_augment = ImageDataGenerator(rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1 )
|
Digit Recognizer
|
4,249,570 |
<import_modules>
|
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32,(5,5), padding='same', activation='relu', input_shape=(28,28,1)) ,
tf.keras.layers.Conv2D(32,(5,5), padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu'),
tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(10, activation='softmax')
])
model.summary()
|
Digit Recognizer
|
4,249,570 |
sys.path.append('.. /input/pytorch-image-models/pytorch-image-models-master')
Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip,
RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout,
IAAAdditiveGaussianNoise, Transpose, CenterCrop
)
warnings.filterwarnings('ignore')
if CFG['debug']:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cuda')
start_time = datetime.datetime.now()
start_time_str = start_time.strftime('%m%d%H%M' )<load_from_csv>
|
annealer = ReduceLROnPlateau(
monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 )
|
Digit Recognizer
|
4,249,570 |
train = pd.read_csv(f'{DATA_PATH}/train.csv')
test = pd.read_csv(f'{DATA_PATH}/sample_submission.csv')
label_map = pd.read_json(f'{DATA_PATH}/label_num_to_disease_map.json',
orient='index')
if CFG['debug']:
train = train.sample(n=1000, random_state=CFG['seed'] ).reset_index(drop=True )<define_variables>
|
optim = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer=optim, loss="categorical_crossentropy", metrics=['accuracy'])
num_epochs = 30
batch_size = 86
history = model.fit_generator(
data_augment.flow(train_data, train_labels, batch_size=batch_size),
steps_per_epoch=train_data.shape[0]//batch_size,
validation_data=(valid_data, valid_labels),
epochs=num_epochs,
callbacks=[annealer]
)
|
Digit Recognizer
|
4,249,570 |
model_dirs = []
for stage1 in CFG['stage1_models']:
num = str(stage1 ).rjust(2, '0')
output_dir_ = glob.glob(f'.. /input/{num}*/')
assert len(output_dir_)== 1, output_dir_
model_dirs.append(output_dir_[0])
model_dirs<load_pretrained>
|
predictions = model.predict(test_data)
pred_list = []
for index, pred in enumerate(predictions):
pred_list.append({"ImageId": index+1, "Label": np.argmax(pred)})
sub_df = pd.DataFrame(pred_list)
sub_df.to_csv("submission.csv", index=False )
|
Digit Recognizer
|
3,916,180 |
normal_configs = []
tta_configs = []
normal_model_dirs = []
tta_model_dirs = []
for model_dir in model_dirs:
assert len(glob.glob(f'{model_dir}/*.yml')) ==1
config_path = glob.glob(f'{model_dir}/*.yml')[0]
with open(config_path)as f:
config = yaml.load(f)
if 'valid_augmentation' in config['tag'].keys() :
tta_model_dirs.append(model_dir)
tta_configs.append(config)
else:
normal_model_dirs.append(model_dir)
normal_configs.append(config )<compute_test_metric>
|
import numpy as np
import pandas as pd
import random
|
Digit Recognizer
|
3,916,180 |
def get_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def remove_glob(pathname, recursive=True):
for p in glob.glob(pathname, recursive=recursive):
if os.path.isfile(p):
os.remove(p)
@contextmanager
def timer(name):
t0 = time.time()
LOGGER.info(f'[{name}] start')
yield
LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s.')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
<define_variables>
|
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras import backend as K
|
Digit Recognizer
|
3,916,180 |
TRAIN_PATH = '.. /input/cassava-leaf-disease-classification/train_images'
TEST_PATH = '.. /input/cassava-leaf-disease-classification/test_images'<normalization>
|
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
3,916,180 |
class TestDataset(Dataset):
def __init__(self, df, transform=None):
self.df = df
self.file_names = df['image_id'].values
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{TEST_PATH}/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
return image
class TTADataset(Dataset):
def __init__(self, df, image_path, ttas):
self.df = df
self.file_names = df['image_id'].values
self.labels = df['label'].values
self.image_path = image_path
self.ttas = ttas
def __len__(self)-> int:
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{self.image_path}/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imglist=[tta(image=image)['image'] for tta in self.ttas]
image=torch.stack(imglist)
label = torch.tensor(self.labels[idx] ).long()
return image, label<normalization>
|
train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv")
|
Digit Recognizer
|
3,916,180 |
def _get_augmentations(aug_list, cfg):
process = []
for aug in aug_list:
if aug == 'Resize':
process.append(Resize(cfg['size'], cfg['size']))
elif aug == 'RandomResizedCrop':
process.append(RandomResizedCrop(cfg['size'], cfg['size']))
elif aug == 'CenterCrop':
process.append(CenterCrop(CFG['size'], CFG['size']))
elif aug == 'Transpose':
process.append(Transpose(p=0.5))
elif aug == 'HorizontalFlip':
process.append(HorizontalFlip(p=0.5))
elif aug == 'VerticalFlip':
process.append(VerticalFlip(p=0.5))
elif aug == 'ShiftScaleRotate':
process.append(ShiftScaleRotate(p=0.5))
elif aug == 'Normalize':
process.append(Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
))
else:
raise ValueError(f'{aug} is not suitable')
process.append(ToTensorV2())
return process
def get_transforms(*, aug_list, cfg):
return Compose(
_get_augmentations(aug_list, cfg)
)<choose_model_class>
|
y_train = train["label"]
X_train = train.drop(labels = ["label"],axis = 1 )
|
Digit Recognizer
|
3,916,180 |
class CustomModel(nn.Module):
def __init__(self, model_name, target_size, pretrained=False):
super().__init__()
self.model = timm.create_model(model_name, pretrained=pretrained)
if hasattr(self.model, 'classifier'):
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, target_size)
elif hasattr(self.model, 'fc'):
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, target_size)
elif hasattr(self.model, 'head'):
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features, target_size)
def forward(self, x):
x = self.model(x)
return x<categorify>
|
def prep_data(X_train, y_train, test):
X_train = X_train.astype('float32')/ 255
test = test.astype('float32')/255
X_train = X_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1)
y_train = keras.utils.np_utils.to_categorical(y_train)
classes = y_train.shape[1]
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size = 0.2, random_state = int(time.time()))
return X_train, y_train, X_test, y_test, classes, test
|
Digit Recognizer
|
3,916,180 |
def inference_tta(model, states, tta_loader, device):
model.to(device)
tk0 = tqdm(enumerate(tta_loader), total=len(tta_loader))
probs = []
for i,(images, _)in tk0:
images = images.to(device)
batch_size, n_crops, c, h, w = images.size()
images = images.view(-1, c, h, w)
avg_preds = []
for state in states:
model.load_state_dict(state['model'])
model.eval()
with torch.no_grad() :
y_preds = model(images ).softmax(1)
y_preds = y_preds.view(batch_size, n_crops,-1)
avg_preds.append(y_preds.to('cpu' ).numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
del images, _, y_preds, avg_preds
torch.cuda.empty_cache()
probs = np.concatenate(probs)
return probs<create_dataframe>
|
X_train, y_train, X_test, y_test, out_neurons, test = prep_data(X_train, y_train, test )
|
Digit Recognizer
|
3,916,180 |
def main_tta(config, model_dir):
INFO = config['info']
TAG = config['tag']
CFG = config['cfg']
CFG['train'] = False
CFG['inference'] = True
inference_batch_size = 8
seed_torch(seed=CFG['seed'])
model = CustomModel(TAG['model_name'], CFG['target_size'], pretrained=False)
states = [torch.load(path)for path in glob.glob(f'{model_dir}/*.pth')]
ttas = get_ttas(CFG)
tta_dataset = TTADataset(test, TEST_PATH, ttas=ttas)
tta_loader = DataLoader(tta_dataset, batch_size=inference_batch_size, shuffle=False,
num_workers=2, pin_memory=True)
predictions = inference_tta(model, states, tta_loader, device)
return predictions<categorify>
|
model = Sequential([
Conv2D(32, kernel_size =(3, 3), padding = 'same', activation = 'relu', input_shape =(28,28,1)) ,
Conv2D(32, kernel_size =(3, 3), activation = 'relu', padding = 'same'),
MaxPool2D(pool_size =(2, 2)) ,
Dropout(0.25),
Conv2D(64, kernel_size =(3, 3), activation = 'relu', padding = 'same'),
Conv2D(64, kernel_size =(3, 3), activation = 'relu', padding = 'same'),
MaxPool2D(pool_size =(2, 2)) ,
Dropout(0.25),
Conv2D(128, kernel_size =(3, 3), activation = 'relu', padding = 'same'),
Conv2D(128, kernel_size =(3, 3), activation = 'relu', padding = 'same'),
MaxPool2D(pool_size =(2, 2)) ,
Dropout(0.25),
Conv2D(128, kernel_size =(3, 3), activation = 'relu', padding = 'same'),
Conv2D(128, kernel_size =(3, 3), activation = 'relu', padding = 'same'),
MaxPool2D(pool_size =(2, 2)) ,
Dropout(0.25),
Flatten() ,
Dense(512, activation = 'relu'),
Dropout(0.5),
Dense(256, activation = 'relu'),
Dropout(0.5),
Dense(out_neurons, activation = 'softmax')
])
model.summary()
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss = 'categorical_crossentropy', optimizer = sgd , metrics = ['accuracy'] )
|
Digit Recognizer
|
3,916,180 |
data_num = len(test)
model_num = len(model_dirs)
target_num = CFG['target_size']
channel_num = 4
stage1_predictions = np.zeros(( model_num, data_num, channel_num, target_num), dtype=np.float)
for config, model_dir in zip(tta_configs, tta_model_dirs):
stage1_predictions[model_dirs.index(model_dir)] = main_tta(config, model_dir)
weights_opt_feats = stage1_predictions.mean(axis=2)
stage1_predictions = stage1_predictions.transpose(1, 2, 0, 3 )<categorify>
|
model.fit(X_train, y_train,
batch_size = 512,
epochs = 180,
validation_data =(X_test, y_test),
verbose = 0);
|
Digit Recognizer
|
3,916,180 |
class StackingDataset(Dataset):
def __init__(self, X: np.ndarray, y: Optional[np.ndarray] = None):
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if self.y is None:
return torch.tensor(self.X[idx], dtype=torch.float)
else:
return(
torch.tensor(self.X[idx], dtype=torch.float),
torch.tensor(self.y[idx], dtype=torch.long),
)<choose_model_class>
|
result = model.evaluate(X_test, y_test, verbose = 0)
print('Accuracy: ', result[1])
print('Error: %.2f%%' %(100- result[1]*100))
y_pred = model.predict(test, verbose=0 )
|
Digit Recognizer
|
3,916,180 |
class CNNStacking(nn.Module):
def __init__(self, n_labels):
super(CNNStacking, self ).__init__()
self.sq = nn.Sequential(
nn.Conv2d(in_channels=4, out_channels=8, kernel_size=(3, 1), bias=False),
nn.ReLU() ,
nn.Conv2d(in_channels=8, out_channels=16, kernel_size=(3, 1), bias=False),
nn.ReLU() ,
nn.Flatten() ,
nn.Linear(in_features=16* n_labels, out_features=4 * n_labels),
nn.ReLU() ,
nn.Linear(in_features=4 * n_labels, out_features=n_labels),
)
def forward(self, x):
return self.sq(x )<predict_on_test>
|
solution = np.argmax(y_pred,axis = 1)
solution = pd.Series(solution, name="Label" ).astype(int)
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),solution],axis = 1)
submission.to_csv("mnist_with_cnn.csv",index=False )
|
Digit Recognizer
|
4,829,753 |
def inference(model, states, test_loader, device):
model.to(device)
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
probs = []
for i,(features)in tk0:
features = features.to(device)
avg_preds = []
for state in states:
model.load_state_dict(state['model'])
model.eval()
with torch.no_grad() :
y_preds = model(features)
avg_preds.append(y_preds.softmax(1 ).to('cpu' ).numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
probs = np.concatenate(probs)
return probs<save_to_csv>
|
np.random.seed(7)
%matplotlib inline
|
Digit Recognizer
|
4,829,753 |
model = CNNStacking(CFG['target_size'])
states = [torch.load(STAGE2_DIR+f'/fold{fold}_best.pth')for fold in CFG['trn_fold']]
test_dataset = StackingDataset(stage1_predictions)
test_loader = DataLoader(test_dataset, batch_size=CFG['batch_size'], shuffle=False,
num_workers=CFG['num_workers'], pin_memory=True)
pred_stacking = inference(model, states, test_loader, device)
<load_pretrained>
|
train_path = '.. //input//train.csv'
test_path = '.. //input//test.csv'
train_df = pd.read_csv(train_path)
test_df = pd.read_csv(test_path)
train_df.info()
test_df.info()
|
Digit Recognizer
|
4,829,753 |
with open('.. /input/train-weights-optimization/best_weights.json', 'r')as f:
weights_dict = json.load(f)
weights_dict<define_variables>
|
pd.options.display.max_rows = 1000
print(train_df.isnull().sum() )
|
Digit Recognizer
|
4,829,753 |
pred_weights_opt = np.zeros(weights_opt_feats.shape[1:], dtype=np.float)
for idx, key in enumerate(model_dirs):
pred_weights_opt += weights_opt_feats[idx] * weights_dict[key[:-1]]<define_variables>
|
print(test_df.isnull().sum() )
|
Digit Recognizer
|
4,829,753 |
BLENDING_WEIGHTS = {
"stacking": 0.5,
"weights_opt": 0.5
}<prepare_output>
|
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
|
Digit Recognizer
|
4,829,753 |
predictions = pred_stacking * BLENDING_WEIGHTS['stacking'] + pred_weights_opt * BLENDING_WEIGHTS['weights_opt']
predictions<save_to_csv>
|
def inception_block(inputs):
tower_one = MaxPooling2D(( 3,3), strides=(1,1), padding='same' )(inputs)
tower_one = Conv2D(6,(1,1), activation='relu', border_mode='same' )(tower_one)
tower_two = Conv2D(6,(1,1), activation='relu', border_mode='same' )(inputs)
tower_two = Conv2D(6,(3,3), activation='relu', border_mode='same' )(tower_two)
tower_three = Conv2D(6,(1,1), activation='relu', border_mode='same' )(inputs)
tower_three = Conv2D(6,(5,5), activation='relu', border_mode='same' )(tower_three)
x = concatenate([tower_one, tower_two, tower_three], axis=3)
return x
def inception_model(x_train):
inputs = Input(x_train.shape[1:])
x = inception_block(inputs)
x = Dropout(0.25 )(x)
x = Conv2D(32,(3, 3), padding='same' )(x)
x = Activation('relu' )(x)
x = BatchNormalization()(x)
x = Conv2D(32,(3, 3), padding='same' )(x)
x = Activation('relu' )(x)
x = BatchNormalization()(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25 )(x)
x = Flatten()(x)
x = Dense(64, activation='relu' )(x)
x = Dropout(0.5 )(x)
predictions = Dense(10, activation='softmax' )(x)
model = Model(input=inputs, output=predictions)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
|
Digit Recognizer
|
4,829,753 |
test['label'] = predictions.argmax(1)
test[['image_id', 'label']].to_csv(OUTPUT_DIR+'submission.csv', index=False )<define_variables>
|
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] )
|
Digit Recognizer
|
4,829,753 |
package_path = '.. /input/pytorch-image-models/pytorch-image-models-master'
sys.path.append(package_path)
DATA_DIR = '.. /input/cassava-leaf-disease-classification'
MODEL_DIR_0 = '.. /input/gpu-vit-noisearch-amp-aug-fold-0'
MODEL_DIR_1 = '.. /input/gpu-vit-noisearch-amp-aug-fold-1'
MODEL_DIR_2 = '.. /input/gpu-vit-noisearch-amp-aug-fold-2'
MODEL_DIR_3 = '.. /input/gpu-vit-noisearch-amp-aug-fold-3'
MODEL_DIR_4 = '.. /input/gpu-vit-noisearch-amp-aug-fold-4'
MODEL_DIR_01 = '.. /input/gpu-en-b4-ns-noisearch-amp-aug-fold-0'
MODEL_DIR_11 = '.. /input/gpu-en-b4-ns-noisearch-amp-aug-fold-1'
MODEL_DIR_21 = '.. /input/gpu-en-b4-ns-noisearch-amp-aug-fold-2'
MODEL_DIR_31 = '.. /input/gpu-en-b4-ns-noisearch-amp-aug-fold-3'
MODEL_DIR_41 = '.. /input/gpu-en-b4-ns-noisearch-amp-aug-fold-4'
MODEL_DIR_02 = '.. /input/gpu-seresnext-noisearch-amp-aug-fold-0'
MODEL_DIR_12 = '.. /input/gpu-seresnext-noisearch-amp-aug-fold-1'
MODEL_DIR_22 = '.. /input/gpu-seresnext-noisearch-amp-aug-fold-2'
MODEL_DIR_32 = '.. /input/gpu-seresnext-noisearch-amp-aug-fold-3'
MODEL_DIR_42 = '.. /input/gpu-seresnext-noisearch-amp-aug-fold-4'
MODEL_DIR_03 = '.. /input/gpu-en-b4-ns-amp-aug-fold-0'
MODEL_DIR_13 = '.. /input/gpu-en-b4-ns-amp-aug-fold-1'
MODEL_DIR_23 = '.. /input/gpu-en-b4-ns-amp-aug-fold-2'
MODEL_DIR_33 = '.. /input/gpu-en-b4-ns-amp-aug-fold-3'
MODEL_DIR_43 = '.. /input/gpu-en-b4-ns-amp-aug-fold-4'
<import_modules>
|
history = model.fit(X_train, Y_train,
batch_size=100,
epochs=100,
validation_split=0.1,
shuffle=True )
|
Digit Recognizer
|
4,829,753 |
HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize
)
<init_hyperparams>
|
scores_train = model.evaluate(X_train, Y_train)
print("
%s: %.2f%%" %(model.metrics_names[1], scores_train[1]*100))
|
Digit Recognizer
|
4,829,753 |
CFG = {
'fold_num': 5,
'seed': 719,
'model_arch': 'vit_base_patch16_384',
'img_size': 384,
'epochs': 10,
'train_bs': 16,
'valid_bs': 16,
'lr': 1e-4,
'num_workers': 4,
'accum_iter': 1,
'verbose_step': 1,
'device': 'cuda:0',
'tta': 3,
'used_epochs': [7,8,9],
'weights': [1,1,1,1,1,1]
}<define_variables>
|
predictions = model.predict(X_test)
predictions = np.argmax(predictions, axis = 1)
predictions
|
Digit Recognizer
|
4,829,753 |
EPOCHS0 = {
0: [9,8,6,5],
1: [5,6,4,8],
2: [8,9,7,6],
3: [8,7,9,6],
4: [9,8,7,4]
}
EPOCHS1 = {
0: [8,9,7,6],
1: [9,4,8,6],
2: [9,7,8,4],
3: [5,8,9,3],
4: [6,7,8,9]
}
EPOCHS2 = {
0: [8,9,6,7],
1: [9,8,5,6],
2: [9,7,5,4],
3: [5,8,9,4],
4: [5,8,9,7]
}
EPOCHS3 = {
0: [8,9,7,6],
1: [2,7,9,5],
2: [5,6,9,7],
3: [8,9,7,6],
4: [4,5,8,7]
}
<load_from_csv>
|
result=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) ,"Label": predictions})
result.to_csv("mnist_cnn_only_v1.csv", index=False, header=True )
|
Digit Recognizer
|
1,970,319 |
train = pd.read_csv(f'{DATA_DIR}/train.csv' )<set_options>
|
warnings.filterwarnings('ignore' )
|
Digit Recognizer
|
1,970,319 |
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def get_img(path):
im_bgr = cv2.imread(path)
im_rgb = im_bgr[:, :, ::-1]
return im_rgb<categorify>
|
train_df = pd.read_csv('.. /input/train.csv')
test_df = pd.read_csv('.. /input/test.csv')
print(train_df.shape)
print(test_df.shape )
|
Digit Recognizer
|
1,970,319 |
class CassavaDataset(Dataset):
def __init__(self, df, data_root, transforms=None, output_label=True):
super().__init__()
self.df = df.reset_index(drop=True ).copy()
self.transforms = transforms
self.data_root = data_root
self.output_label = output_label
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index: int):
if self.output_label:
target = self.df.iloc[index]['label']
img = get_img(f"{self.data_root}/{self.df.loc[index]['image_id']}")
if self.transforms:
img = self.transforms(image=img)['image']
if self.output_label:
return img, target
else:
return img<choose_model_class>
|
print('Missing values in training dataset : %d' %np.sum(train_df.isnull().sum()))
print('Missing values in testing dataset : %d' %np.sum(test_df.isnull().sum()))
|
Digit Recognizer
|
1,970,319 |
class CassvaImgClassifierN(nn.Module):
def __init__(self, model_arch, n_class, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained)
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, n_class)
def forward(self, x):
x = self.model(x)
return x<choose_model_class>
|
train_df = train_df/255.0
test_df = test_df/255.0
train_df = train_df.values.reshape(-1,28,28,1)
test_df = test_df.values.reshape(-1,28,28,1 )
|
Digit Recognizer
|
1,970,319 |
class CassvaImgClassifier(nn.Module):
def __init__(self, model_arch, n_class, pretrained=False):
super().__init__()
self.n_class = n_class
self.model = timm.create_model(model_arch, pretrained=pretrained)
if 'vit' in model_arch:
n_features = self.model.head.in_features
self.model.head = nn.Identity()
if 'eff' in model_arch:
n_features = self.model.classifier.in_features
self.model.classifier = nn.Identity()
if 'resnext' in model_arch:
n_features = self.model.fc.in_features
self.model.fc = nn.Identity()
self.layer_noise = nn.Linear(n_features, n_class)
self.layer_pure = nn.Linear(n_features, n_class)
def forward(self, x):
x = self.model(x)
x1 = self.layer_noise(x)
x2 = self.layer_pure(x)
x1 = x1 + x2
return x1, x2<find_best_model_class>
|
train_y = to_categorical(train_label, num_classes = 10)
print('one hot encoding vector of %d :' %train_label[3] , train_y[3] )
|
Digit Recognizer
|
1,970,319 |
def inference_one_epoch(model, data_loader, device):
model.eval()
image_preds_all = []
pbar = tqdm(enumerate(data_loader), total=len(data_loader))
for step,(imgs)in pbar:
imgs = imgs.to(device ).float()
_, image_preds = model(imgs)
image_preds_all += [torch.sigmoid(image_preds ).detach().cpu().numpy() ]
image_preds_all = np.concatenate(image_preds_all, axis=0)
return image_preds_all<create_dataframe>
|
x_train,x_val,y_train,y_val = train_test_split(train_df,train_y,test_size = 0.2, random_state = 2)
|
Digit Recognizer
|
1,970,319 |
def run_inference(fold, MODEL_DIR, model_arch, epoch_list):
print(f'Inference fold {fold} started')
test = pd.DataFrame()
test['image_id'] = list(os.listdir(f'{DATA_DIR}/test_images/'))
if 'vit' in model_arch:
T = get_inference_transforms_384()
else:
T = get_inference_transforms()
test_ds = CassavaDataset(
test, f'{DATA_DIR}/test_images/',
transforms=T, output_label=False)
tst_loader = torch.utils.data.DataLoader(
test_ds,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(CFG['device'])
model = CassvaImgClassifier(model_arch, train.label.nunique() ).to(device)
tst_preds = []
for i, epoch in enumerate(epoch_list[fold][:1]):
model.load_state_dict(torch.load(f'{MODEL_DIR}/{model_arch}_fold_{fold}_{epoch}'))
with torch.no_grad() :
for _ in range(CFG['tta']):
tst_image_preds = inference_one_epoch(model, tst_loader, device)
tst_preds += [(1/CFG['tta'])*tst_image_preds]
tst_preds = np.sum(tst_preds, axis=0)
del model
torch.cuda.empty_cache()
return tst_preds<create_dataframe>
|
dataGenerator = ImageDataGenerator(rotation_range = 10,
width_shift_range = 0.1,
height_shift_range = 0.1,
zoom_range = 0.1)
dataGenerator.fit(x_train )
|
Digit Recognizer
|
1,970,319 |
def run_inferenceN(fold, MODEL_DIR, model_arch):
print(f'Inference fold {fold} started')
test = pd.DataFrame()
test['image_id'] = list(os.listdir(f'{DATA_DIR}/test_images/'))
test_ds = CassavaDataset(
test, f'{DATA_DIR}/test_images/',
transforms=get_inference_transforms() , output_label=False)
tst_loader = torch.utils.data.DataLoader(
test_ds,
batch_size=CFG['valid_bs'],
num_workers=CFG['num_workers'],
shuffle=False,
pin_memory=False,
)
device = torch.device(CFG['device'])
model = CassvaImgClassifierN(model_arch, train.label.nunique() ).to(device)
tst_preds = []
for i, epoch in enumerate(CFG['used_epochs']):
model.load_state_dict(
torch.load(f'{MODEL_DIR}/{model_arch}_fold_{fold}_{epoch}'))
with torch.no_grad() :
for _ in range(CFG['tta']):
tst_image_preds = inference_one_epochN(model, tst_loader, device)
tst_preds += [(1/ CFG['tta'])*tst_image_preds]
tst_preds = np.mean(tst_preds, axis=0)
del model
torch.cuda.empty_cache()
return tst_preds<define_variables>
|
ExampleImg = train_df[10][:,:,0]
def translate(img,x,y):
transMat = np.float32([[1,0,x],[0,1,y]])
shifted = cv2.warpAffine(img,transMat,(img.shape[1],img.shape[0]))
return shifted
def rotate(img,angle):
(h,w)= img.shape[:2]
center =(w/2,h/2)
rotatMat = cv2.getRotationMatrix2D(center,angle,1)
rotated = cv2.warpAffine(img,rotatMat,(w,h))
return rotated
def zoom(img,scale):
(oh,ow)= img.shape[:2]
zoomed = cv2.resize(img, None, fx = scale, fy = scale)
(nh,nw)= zoomed.shape[:2]
extend1 = int(( oh - nh)/ 2.0 + 0.5)
extend2 = oh -(nh+extend1)
zoomed = cv2.copyMakeBorder(zoomed,extend1,extend2,extend1,extend2,cv2.BORDER_CONSTANT,0)
return zoomed
shifted = translate(ExampleImg,0,3)
rotated = rotate(ExampleImg,10)
zoomed = zoom(ExampleImg,0.8)
plt.figure(figsize =(10,20))
plt.subplot(1,4,1)
plt.imshow(ExampleImg,cmap = plt.cm.binary)
plt.title('Original')
plt.subplot(1,4,2)
plt.imshow(rotated,cmap = plt.cm.binary)
plt.title('rotated: 10 deg')
plt.subplot(1,4,3)
plt.imshow(shifted,cmap = plt.cm.binary)
plt.title('shifted: y-3')
plt.subplot(1,4,4)
plt.imshow(zoomed,cmap = plt.cm.binary)
plt.title('zoomed: 0.8 scale')
plt.show()
|
Digit Recognizer
|
1,970,319 |
preds0 = run_inference(0, MODEL_DIR_0, 'vit_base_patch16_384', EPOCHS0)
preds1 = run_inference(1, MODEL_DIR_1, 'vit_base_patch16_384', EPOCHS0)
preds2 = run_inference(2, MODEL_DIR_2, 'vit_base_patch16_384', EPOCHS0)
preds3 = run_inference(3, MODEL_DIR_3, 'vit_base_patch16_384', EPOCHS0)
preds4 = run_inference(4, MODEL_DIR_4, 'vit_base_patch16_384', EPOCHS0)
PRED0 =(preds0 + preds1 + preds2 + preds3 + preds4)/5<define_variables>
|
def build_CNN(input_shape, output_units = 10):
input_layer = keras.layers.Input(input_shape, name = "input_layer")
x = Conv2D(filters = 64, kernel_size =(3,3), padding = 'same' )(input_layer)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = Conv2D(filters = 64, kernel_size =(3,3), padding = 'same' )(x)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = MaxPooling2D(pool_size=(2,2))(x)
x = Dropout(0.25 )(x)
x = Conv2D(filters = 32, kernel_size =(3,3), padding = 'same' )(x)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = Conv2D(filters = 32, kernel_size =(3,3), padding = 'same' )(x)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = MaxPooling2D(pool_size=(2,2))(x)
x = Dropout(0.25 )(x)
x = Flatten()(x)
x = Dense(units = 512 )(x)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = Dense(units = 256 )(x)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = Dropout(0.25 )(x)
x = Dense(units = 128 )(x)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = Dense(units = 64, activation = 'relu' )(x)
output_layer = Dense(units = output_units, activation = 'softmax', name = "output_layer" )(x)
model = keras.models.Model(inputs = [input_layer], outputs = [output_layer])
return model
|
Digit Recognizer
|
1,970,319 |
preds0 = run_inference(0, MODEL_DIR_01, 'tf_efficientnet_b4_ns', EPOCHS1)
preds1 = run_inference(1, MODEL_DIR_11, 'tf_efficientnet_b4_ns', EPOCHS1)
preds2 = run_inference(2, MODEL_DIR_21, 'tf_efficientnet_b4_ns', EPOCHS1)
preds3 = run_inference(3, MODEL_DIR_31, 'tf_efficientnet_b4_ns', EPOCHS1)
preds4 = run_inference(4, MODEL_DIR_41, 'tf_efficientnet_b4_ns', EPOCHS1)
PRED1 =(preds0 + preds1 + preds2 + preds3 + preds4)/5<define_variables>
|
batch_size = 128
epochs = 300
momentum = 0.95
lr = 5e-4
ES = keras.callbacks.EarlyStopping(monitor = "val_loss",
patience = 10,
verbose = 1)
model = build_CNN(x_train.shape[1:],10)
optimizer = keras.optimizers.SGD(lr = lr, momentum = 0.95)
model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics = ['accuracy'])
model.summary()
history = model.fit_generator(dataGenerator.flow(x_train, y_train, batch_size = batch_size),
epochs = epochs, verbose = 2, validation_data =(x_val,y_val),
steps_per_epoch = x_train.shape[0]/batch_size )
|
Digit Recognizer
|
1,970,319 |
preds0 = run_inference(0, MODEL_DIR_02, 'seresnext50_32x4d', EPOCHS2)
preds1 = run_inference(1, MODEL_DIR_12, 'seresnext50_32x4d', EPOCHS2)
preds2 = run_inference(2, MODEL_DIR_22, 'seresnext50_32x4d', EPOCHS2)
preds3 = run_inference(3, MODEL_DIR_32, 'seresnext50_32x4d', EPOCHS2)
preds4 = run_inference(4, MODEL_DIR_42, 'seresnext50_32x4d', EPOCHS2)
PRED2 =(preds0 + preds1 + preds2 + preds3 + preds4)/5<define_variables>
|
final_prediction = model.predict(test_df)
final_prediction = np.argmax(final_prediction, axis = 1 )
|
Digit Recognizer
|
1,970,319 |
preds0 = run_inferenceN(0, MODEL_DIR_03, 'tf_efficientnet_b4_ns',)
preds1 = run_inferenceN(1, MODEL_DIR_13, 'tf_efficientnet_b4_ns',)
preds2 = run_inferenceN(2, MODEL_DIR_23, 'tf_efficientnet_b4_ns',)
preds3 = run_inferenceN(3, MODEL_DIR_33, 'tf_efficientnet_b4_ns',)
preds4 = run_inferenceN(4, MODEL_DIR_43, 'tf_efficientnet_b4_ns')
PRED3 =(preds0 + preds1 + preds2 + preds3 + preds4)/5<define_variables>
|
submission = pd.DataFrame({'ImageId':np.arange(1,final_prediction.shape[0]+1,1),'Label':final_prediction})
submission.to_csv('submission_v5.csv',index = False )
|
Digit Recognizer
|
4,442,168 |
tst_preds =(PRED0 + 2*PRED1 + PRED2 + PRED3)/5<save_to_csv>
|
%matplotlib inline
seed = 4098653265
seed_all(seed, det_cudnn=True)
def mnist_learner(data: ImageDataBunch, model_name: Optional[str] = None)-> Learner:
cbs =(( partial(SaveModelCallback, monitor='accuracy', name=model_name),)
if model_name else None)
lrn = cnn_learner(data, models.resnet152, metrics=accuracy,
opt_func=partial(optim.SGD, momentum=0.9),
callback_fns=cbs)
seed = 224391980
seed_all(seed, det_cudnn=True)
return lrn
|
Digit Recognizer
|
4,442,168 |
test = pd.DataFrame()
test['image_id'] = list(os.listdir(f'{DATA_DIR}/test_images/'))
test['label'] = np.argmax(tst_preds, axis=1)
test.to_csv('submission.csv', index=False )<define_variables>
|
data = ImageDataBunch.from_csv('data', test='test', num_workers=0)
data.test_ds.x.items = np.array(sorted(data.test_ds.x.items, key=attrgetter('stem')))
learner = mnist_learner(data, 'best-freezed')
learner.lr_find()
learner.recorder.plot(suggestion=True )
|
Digit Recognizer
|
4,442,168 |
CONFIG_NAME = 'stacking12.yml'
debug = False
STAGE2_DIR = '.. /input/train-stacking-2dcnn-ver3/output'<define_variables>
|
learner.fit_one_cycle(30, 1.3e-2 )
|
Digit Recognizer
|
4,442,168 |
CONFIG_PATH = f'{STAGE2_DIR}/{CONFIG_NAME}'
with open(CONFIG_PATH)as f:
config = yaml.load(f)
INFO = config['info']
TAG = config['tag']
CFG = config['cfg']
OUTPUT_DIR = './'
DATA_PATH = '.. /input/cassava-leaf-disease-classification'<define_variables>
|
learner = mnist_learner(data ).load('best-freezed')
corr_args = DatasetFormatter.from_most_unsure(learner, 100)
corr = PredictionsCorrector(*corr_args )
|
Digit Recognizer
|
4,442,168 |
<import_modules>
|
corr.corrections = {
275: 5, 927: 6, 3080: 0, 3162: 3, 3485: 8, 3700: 5, 3740: 9,
4680: 1, 5215: 1, 5276: 1, 5928: 5, 6789: 3, 7026: 4, 9040: 2,
9202: 7, 9744: 3, 9814: 4, 9924: 2, 10344: 9, 11370: 8, 11746: 8,
11862: 8, 12620: 8, 12864: 9, 14579: 9, 14742: 0, 16204: 9,
16281: 5, 16452: 4, 17589: 8, 17931: 5, 18166: 1, 19351: 1,
19896: 8, 20052: 9, 20509: 5, 20665: 1, 20946: 3, 21388: 1,
22253: 8, 22465: 4, 22527: 1, 22632: 8, 22823: 4, 23874: 1,
24015: 4, 24155: 4, 24662: 8, 25527: 6, 25915: 8, 26913: 3,
27372: 3, 27402: 2
}
corr.show_corrections(9, figsize=(14.5, 11))
|
Digit Recognizer
|
4,442,168 |
sys.path.append('.. /input/pytorch-image-models/pytorch-image-models-master')
Compose, OneOf, Normalize, Resize, RandomResizedCrop, RandomCrop, HorizontalFlip, VerticalFlip,
RandomBrightness, RandomContrast, RandomBrightnessContrast, Rotate, ShiftScaleRotate, Cutout,
IAAAdditiveGaussianNoise, Transpose, CenterCrop
)
warnings.filterwarnings('ignore')
if CFG['debug']:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cuda')
start_time = datetime.datetime.now()
start_time_str = start_time.strftime('%m%d%H%M' )<load_from_csv>
|
preds = corr.corrected_labels()
submission = pd.DataFrame(
preds, columns=('Label',),
index=pd.Index(( 1 + int(p.stem)for p in data.test_ds.x.items),
name='ImageId')
).sort_index()
submission.to_csv('submission.csv')
submission.head(10 )
|
Digit Recognizer
|
4,442,168 |
train = pd.read_csv(f'{DATA_PATH}/train.csv')
test = pd.read_csv(f'{DATA_PATH}/sample_submission.csv')
label_map = pd.read_json(f'{DATA_PATH}/label_num_to_disease_map.json',
orient='index')
if CFG['debug']:
train = train.sample(n=1000, random_state=CFG['seed'] ).reset_index(drop=True )<define_variables>
|
! echo 64e2b22ef2bf4e7f8b179c497a81aeea11761ce1e242083a44c42a20c8a52c65 \
submission.csv | sha256sum -c
|
Digit Recognizer
|
7,390,641 |
model_dirs = []
for stage1 in CFG['stage1_models']:
num = str(stage1 ).rjust(2, '0')
output_dir_ = glob.glob(f'.. /input/{num}*/')
assert len(output_dir_)== 1, output_dir_
model_dirs.append(output_dir_[0])
model_dirs<load_pretrained>
|
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
|
Digit Recognizer
|
7,390,641 |
normal_configs = []
tta_configs = []
normal_model_dirs = []
tta_model_dirs = []
for model_dir in model_dirs:
assert len(glob.glob(f'{model_dir}/*.yml')) ==1
config_path = glob.glob(f'{model_dir}/*.yml')[0]
with open(config_path)as f:
config = yaml.load(f)
if 'valid_augmentation' in config['tag'].keys() :
tta_model_dirs.append(model_dir)
tta_configs.append(config)
else:
normal_model_dirs.append(model_dir)
normal_configs.append(config )<compute_test_metric>
|
X_train = train / 255.0
X_test = test / 255.0
|
Digit Recognizer
|
7,390,641 |
def get_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def remove_glob(pathname, recursive=True):
for p in glob.glob(pathname, recursive=recursive):
if os.path.isfile(p):
os.remove(p)
@contextmanager
def timer(name):
t0 = time.time()
LOGGER.info(f'[{name}] start')
yield
LOGGER.info(f'[{name}] done in {time.time() - t0:.0f} s.')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
<define_variables>
|
X_train =(X_train.iloc[:,1:].values ).astype('float32')
y_train = train['label'].astype('float32')
X_test = X_test.values.astype('float32' )
|
Digit Recognizer
|
7,390,641 |
TRAIN_PATH = '.. /input/cassava-leaf-disease-classification/train_images'
TEST_PATH = '.. /input/cassava-leaf-disease-classification/test_images'<normalization>
|
mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)
def standardize(x):
return(x - mean_px)/std_px
[mean_px, std_px]
|
Digit Recognizer
|
7,390,641 |
class TestDataset(Dataset):
def __init__(self, df, transform=None):
self.df = df
self.file_names = df['image_id'].values
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{TEST_PATH}/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
return image
class TTADataset(Dataset):
def __init__(self, df, image_path, ttas):
self.df = df
self.file_names = df['image_id'].values
self.labels = df['label'].values
self.image_path = image_path
self.ttas = ttas
def __len__(self)-> int:
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f'{self.image_path}/{file_name}'
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imglist=[tta(image=image)['image'] for tta in self.ttas]
image=torch.stack(imglist)
label = torch.tensor(self.labels[idx] ).long()
return image, label<normalization>
|
print(y_train)
y_train= to_categorical(y_train)
num_classes = y_train.shape[1]
|
Digit Recognizer
|
7,390,641 |
def _get_augmentations(aug_list, cfg):
process = []
for aug in aug_list:
if aug == 'Resize':
process.append(Resize(cfg['size'], cfg['size']))
elif aug == 'RandomResizedCrop':
process.append(RandomResizedCrop(cfg['size'], cfg['size']))
elif aug == 'CenterCrop':
process.append(CenterCrop(CFG['size'], CFG['size']))
elif aug == 'Transpose':
process.append(Transpose(p=0.5))
elif aug == 'HorizontalFlip':
process.append(HorizontalFlip(p=0.5))
elif aug == 'VerticalFlip':
process.append(VerticalFlip(p=0.5))
elif aug == 'ShiftScaleRotate':
process.append(ShiftScaleRotate(p=0.5))
elif aug == 'Normalize':
process.append(Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
))
else:
raise ValueError(f'{aug} is not suitable')
process.append(ToTensorV2())
return process
def get_transforms(*, aug_list, cfg):
return Compose(
_get_augmentations(aug_list, cfg)
)<choose_model_class>
|
seed = 43
np.random.seed(seed)
seed
|
Digit Recognizer
|
7,390,641 |
class CustomModel(nn.Module):
def __init__(self, model_name, target_size, pretrained=False):
super().__init__()
self.model = timm.create_model(model_name, pretrained=pretrained)
if hasattr(self.model, 'classifier'):
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, target_size)
elif hasattr(self.model, 'fc'):
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, target_size)
elif hasattr(self.model, 'head'):
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features, target_size)
def forward(self, x):
x = self.model(x)
return x<categorify>
|
gen = image.ImageDataGenerator()
X = X_train
y = y_train
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.10, random_state=42)
batches = gen.flow(X_train, y_train, batch_size=64)
val_batches=gen.flow(X_val, y_val, batch_size=64)
|
Digit Recognizer
|
7,390,641 |
def inference_tta(model, states, tta_loader, device):
model.to(device)
tk0 = tqdm(enumerate(tta_loader), total=len(tta_loader))
probs = []
for i,(images, _)in tk0:
images = images.to(device)
batch_size, n_crops, c, h, w = images.size()
images = images.view(-1, c, h, w)
avg_preds = []
for state in states:
model.load_state_dict(state['model'])
model.eval()
with torch.no_grad() :
y_preds = model(images ).softmax(1)
y_preds = y_preds.view(batch_size, n_crops,-1)
avg_preds.append(y_preds.to('cpu' ).numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
del images, _, y_preds, avg_preds
torch.cuda.empty_cache()
probs = np.concatenate(probs)
return probs
<create_dataframe>
|
gen =ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.08)
batches = gen.flow(X_train, y_train, batch_size=64)
val_batches = gen.flow(X_val, y_val, batch_size=64)
|
Digit Recognizer
|
7,390,641 |
def main_tta(config, model_dir):
INFO = config['info']
TAG = config['tag']
CFG = config['cfg']
CFG['train'] = False
CFG['inference'] = True
inference_batch_size = 8
seed_torch(seed=CFG['seed'])
model = CustomModel(TAG['model_name'], CFG['target_size'], pretrained=False)
states = [torch.load(path)for path in glob.glob(f'{model_dir}/*.pth')]
ttas = get_ttas(CFG)
tta_dataset = TTADataset(test, TEST_PATH, ttas=ttas)
tta_loader = DataLoader(tta_dataset, batch_size=inference_batch_size, shuffle=False,
num_workers=2, pin_memory=True)
predictions = inference_tta(model, states, tta_loader, device)
return predictions<define_variables>
|
def get_bn_model() :
model = Sequential([
Lambda(standardize, input_shape=(28,28,1)) ,
Conv2D(32,(3,3), activation='relu'),
BatchNormalization() ,
Conv2D(32,(3,3), activation='relu'),
MaxPooling2D() ,
BatchNormalization() ,
Conv2D(64,(3,3), activation='relu'),
BatchNormalization() ,
Conv2D(64,(3,3), activation='relu'),
MaxPooling2D() ,
Flatten() ,
BatchNormalization() ,
Dense(512, activation='relu'),
BatchNormalization() ,
Dense(10, activation='softmax')
])
model.compile(Adam() , loss='categorical_crossentropy', metrics=['accuracy'])
return model
model= get_bn_model()
model.optimizer.learning_rate=0.01
history=model.fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=1,
validation_data=val_batches, validation_steps=val_batches.n)
|
Digit Recognizer
|
7,390,641 |
data_num = len(test)
model_num = len(model_dirs)
target_num = CFG['target_size']
channel_num = 4
stage1_predictions = np.zeros(( model_num, data_num, channel_num, target_num), dtype=np.float)
for config, model_dir in zip(tta_configs, tta_model_dirs):
stage1_predictions[model_dirs.index(model_dir)] = main_tta(config, model_dir)
stage1_predictions = stage1_predictions.transpose(1, 2, 0, 3 )<categorify>
|
model.optimizer.learning_rate=0.01
gen = image.ImageDataGenerator()
batches = gen.flow(X, y, batch_size=64)
history=model.fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=3 )
|
Digit Recognizer
|
7,390,641 |
<choose_model_class><EOS>
|
predictions = model.predict_classes(X_test, verbose=0)
submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) ,
"Label": predictions})
submissions.to_csv("DR.csv", index=False, header=True)
|
Digit Recognizer
|
3,729,811 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<predict_on_test>
|
batch_size=128
keras.__version__
|
Digit Recognizer
|
3,729,811 |
def inference(model, states, test_loader, device):
model.to(device)
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
probs = []
for i,(features)in tk0:
features = features.to(device)
avg_preds = []
for state in states:
model.load_state_dict(state['model'])
model.eval()
with torch.no_grad() :
y_preds = model(features)
avg_preds.append(y_preds.softmax(1 ).to('cpu' ).numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
probs = np.concatenate(probs)
return probs<save_to_csv>
|
train_df = pd.read_csv('.. /input/train.csv')
test_df = pd.read_csv('.. /input/test.csv')
pred_df = pd.read_csv('.. /input/sample_submission.csv')
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.