kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,324,252
MAX_SEQUENCE_LENGTH = 100 EMBEDDING_DIM = 300<data_type_conversions>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
13,324,252
corpus = [] feats = ['question1','question2'] for f in feats: train[f] = train[f].astype(str) test[f] = test[f].astype(str) corpus+=train[f].values.tolist() tokenizer = Tokenizer() tokenizer.fit_on_texts(corpus) X_q1 = tokenizer.texts_to_sequences(train['question1']) X_q2 = tokenizer.texts_to_sequences(train['question2']) X_test_q1 = tokenizer.texts_to_sequences(test['question1']) X_test_q2 = tokenizer.texts_to_sequences(test['question2']) X_q1 = pad_sequences(X_q1, maxlen=MAX_SEQUENCE_LENGTH) X_q2 = pad_sequences(X_q2, maxlen=MAX_SEQUENCE_LENGTH) X_test_q1 = pad_sequences(X_test_q1, maxlen=MAX_SEQUENCE_LENGTH) X_test_q2 = pad_sequences(X_test_q2, maxlen=MAX_SEQUENCE_LENGTH) y = train['is_duplicate'].values word_index = tokenizer.word_index nb_words = len(word_index)+1<feature_engineering>
model = load_model("./mod_best.hdf5" )
Digit Recognizer
13,324,252
def get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32') def load_embeddings(path): with open(path)as f: return dict(get_coefs(*line.strip().split(' ')) for line in tqdm(f)) def build_matrix(word_index, path): embedding_index = load_embeddings(path) embedding_matrix = np.zeros(( len(word_index)+ 1, 300)) unknown_words = [] for word, i in word_index.items() : try: embedding_matrix[i] = embedding_index[word] except KeyError: unknown_words.append(word) return embedding_matrix, unknown_words glove_path = '.. /input/glove840b300dtxt/glove.840B.300d.txt' embedding_matrix,unknown_words = build_matrix(word_index,glove_path )<split>
batch_size = 128 epochs = 30 history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size = batch_size), epochs = epochs, validation_data = datagen.flow(X_val,Y_val, batch_size = batch_size), steps_per_epoch = X_train.shape[0] // batch_size, callbacks = callbacks )
Digit Recognizer
13,324,252
X_train_q1,X_val_q1,X_train_q2,X_val_q2,y_train,y_val = train_test_split(X_q1,X_q2,y,train_size=0.8,random_state=1024) print(X_train_q1.shape,X_val_q1.shape) X_train = [X_train_q1,X_train_q2] X_val = [X_val_q1,X_val_q2] X_test = [X_test_q1,X_test_q2]<categorify>
score = model.evaluate(X_val, Y_val, verbose=0) print("Val_Loss :",score[0]) print("Val_Acc :",score[1] )
Digit Recognizer
13,324,252
input_q1 = Input(shape = X_train[0].shape[1]) input_q2 = Input(shape = X_train[0].shape[1]) embedding_layer = Embedding(nb_words, EMBEDDING_DIM, input_length = X_train[0].shape[1], weights = [embedding_matrix], trainable=False) embedded_sequences_q1 = embedding_layer(input_q1) embedded_sequences_q2 = embedding_layer(input_q2) bilstm_layer = Bidirectional(LSTM(64, return_sequences=False)) x1 = bilstm_layer(embedded_sequences_q1) x2 = bilstm_layer(embedded_sequences_q2) distance = layers.Concatenate()([x1, x2]) x = Dense(64, activation="relu" )(distance) preds = Dense(1, activation="sigmoid" )(x) model = Model(inputs=[input_q1, input_q2], outputs=preds) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) model.summary() utils.plot_model(model, show_shapes=True, expand_nested=True )<train_model>
probabilities = model.predict(test) y_pred = np.argmax(probabilities, axis=1) print(y_pred.shape )
Digit Recognizer
13,324,252
<save_to_csv><EOS>
submission = pd.DataFrame({'ImageId': np.arange(1, 28001), 'Label': y_pred}) submission.to_csv("submission.csv", index = False) print("Your submission was successfully saved!" )
Digit Recognizer
14,235,927
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
print(tf.__version__)
Digit Recognizer
14,235,927
le = LabelEncoder() pd.set_option('display.max_rows', 400) pd.set_option('display.max_columns', 160) pd.set_option('display.max_colwidth', 40) warnings.filterwarnings("ignore" )<load_from_csv>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
14,235,927
test = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/test.csv') test.head()<load_from_csv>
cut=0 nets = 15 epochs = 45
Digit Recognizer
14,235,927
categories = pd.read_csv('.. /input/predict-future-sales-eng-translation/categories.csv') pd.DataFrame(categories.category_name.values.reshape(-1, 4))<categorify>
if cut > 0: n_train=round(cut*train.shape[0]) train = train[:n_train] Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) X_train = X_train / 255.0 X_test = test / 255.0 X_train = X_train.values.reshape(-1,28,28,1) X_test = X_test.values.reshape(-1,28,28,1) Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
14,235,927
categories['group_name'] = categories['category_name'].str.extract(r'(^[\w\s]*)') categories['group_name'] = categories['group_name'].str.strip() categories['group_id'] = le.fit_transform(categories.group_name.values) categories.sample(5 )<load_from_csv>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.10, width_shift_range=0.15, height_shift_range=0.15 )
Digit Recognizer
14,235,927
items = pd.read_csv('.. /input/predict-future-sales-eng-translation/items.csv') items['item_name'] = items['item_name'].str.lower() items['item_name'] = items['item_name'].str.replace('.', '') for i in [r'[^\w\d\s\.]', r'\bthe\b', r'\bin\b', r'\bis\b', r'\bfor\b', r'\bof\b', r'\bon\b', r'\band\b', r'\bto\b', r'\bwith\b' , r'\byo\b']: items['item_name'] = items['item_name'].str.replace(i, ' ') items['item_name'] = items['item_name'].str.replace(r'\b.\b', ' ') items['item_name_no_space'] = items['item_name'].str.replace(' ', '') items['item_name_first4'] = [x[:4] for x in items['item_name_no_space']] items['item_name_first6'] = [x[:6] for x in items['item_name_no_space']] items['item_name_first11'] = [x[:11] for x in items['item_name_no_space']] del items['item_name_no_space'] items.item_name_first4 = le.fit_transform(items.item_name_first4.values) items.item_name_first6 = le.fit_transform(items.item_name_first6.values) items.item_name_first11 = le.fit_transform(items.item_name_first11.values) items = items.join(categories.set_index('category_id'), on='category_id') items.sample(10 )<remove_duplicates>
DEVICE = 'TPU' if DEVICE == 'TPU': print('connecting to TPU...') try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: print('Could not connect to TPU') tpu = None if tpu: try: print('Initializing TPU...') tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) distribution_strategy = tf.distribute.experimental.TPUStrategy(tpu) print('TPU initialized') except _: print('Failed to initialize TPU!') else: DEVICE = 'GPU' if DEVICE != 'TPU': print('Using default strategy for CPU and single GPU') distribution_strategy = tf.distribute.get_strategy() if DEVICE == 'GPU': print('Num GPUs Available: ', len(tf.config.experimental.list_physical_devices('GPU'))) print('REPLICAS: ', distribution_strategy.num_replicas_in_sync)
Digit Recognizer
14,235,927
dupes = items[(items.duplicated(subset=['item_name','category_id'],keep=False)) ] dupes['in_test'] = dupes.item_id.isin(test.item_id.unique()) dupes = dupes.groupby('item_name' ).agg({'item_id':['first','last'],'in_test':['first','last']}) dupes = dupes[(dupes[('in_test', 'first')]==False)|(dupes[('in_test', 'last')]==False)] temp = dupes[dupes[('in_test', 'first')]==True] keep_first = dict(zip(temp[('item_id', 'last')], temp[('item_id', 'first')])) temp = dupes[dupes[('in_test', 'first')]==False] keep_second = dict(zip(temp[('item_id', 'first')], temp[('item_id', 'last')])) item_map = {**keep_first, **keep_second}<feature_engineering>
with distribution_strategy.scope() : model = [0] *nets for j in range(nets): model[j] = Sequential() model[j].add(Conv2D(32, kernel_size = 3, activation='relu', input_shape =(28, 28, 1))) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size = 3, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(64, kernel_size = 3, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size = 3, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu')) model[j].add(BatchNormalization()) model[j].add(Dropout(0.4)) model[j].add(Conv2D(128, kernel_size = 4, activation='relu')) model[j].add(BatchNormalization()) model[j].add(Flatten()) model[j].add(Dropout(0.4)) model[j].add(Dense(10, activation='softmax')) model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
14,235,927
sales = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/sales_train.csv') sales =(sales .query('0 < item_price < 50000 and 0 < item_cnt_day < 1001') .replace({ 'shop_id':{0:57, 1:58, 11:10}, 'item_id':item_map }) ) sales = sales[sales['shop_id'].isin(test.shop_id.unique())] sales['date'] = pd.to_datetime(sales.date,format='%d.%m.%Y') sales['weekday'] = sales.date.dt.dayofweek sales['first_sale_day'] = sales.date.dt.dayofyear sales['first_sale_day'] += 365 *(sales.date.dt.year-2013) sales['first_sale_day'] = sales.groupby('item_id')['first_sale_day'].transform('min' ).astype('int16') sales['revenue'] = sales['item_cnt_day']*sales['item_price']<feature_engineering>
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x) history = [0] * nets for j in range(nets): X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.1) history[j] = model[j].fit_generator(datagen.flow(X_train2,Y_train2, batch_size=64), epochs = epochs, steps_per_epoch = X_train2.shape[0]//64, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0) print("CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format( j+1,epochs,max(history[j].history['accuracy']),max(history[j].history['val_accuracy'])) )
Digit Recognizer
14,235,927
temp = sales.groupby(['shop_id','weekday'] ).agg({'item_cnt_day':'sum'} ).reset_index() temp = pd.merge(temp, sales.groupby(['shop_id'] ).agg({'item_cnt_day':'sum'} ).reset_index() , on='shop_id', how='left') temp.columns = ['shop_id','weekday', 'shop_day_sales', 'shop_total_sales'] temp['day_quality'] = temp['shop_day_sales']/temp['shop_total_sales'] temp = temp[['shop_id','weekday','day_quality']] dates = pd.DataFrame(data={'date':pd.date_range(start='2013-01-01',end='2015-11-30')}) dates['weekday'] = dates.date.dt.dayofweek dates['month'] = dates.date.dt.month dates['year'] = dates.date.dt.year - 2013 dates['date_block_num'] = dates['year']*12 + dates['month'] - 1 dates['first_day_of_month'] = dates.date.dt.dayofyear dates['first_day_of_month'] += 365 * dates['year'] dates = dates.join(temp.set_index('weekday'), on='weekday') dates = dates.groupby(['date_block_num','shop_id','month','year'] ).agg({'day_quality':'sum','first_day_of_month':'min'} ).reset_index() dates.query('shop_id == 28' ).head(15 )<groupby>
results = np.zeros(( X_test.shape[0],10)) for j in range(nets): results = results + model[j].predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("submission.csv",index=False )
Digit Recognizer
14,238,959
sales =(sales .groupby(['date_block_num', 'shop_id', 'item_id']) .agg({ 'item_cnt_day':'sum', 'revenue':'sum', 'first_sale_day':'first' }) .reset_index() .rename(columns={'item_cnt_day':'item_cnt'}) ) sales.sample(5 )<drop_column>
from __future__ import print_function, division import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler from torchvision import models, transforms import matplotlib.pyplot as plt import time import copy import pandas as pd from sklearn.model_selection import train_test_split from torch.utils.data import DataLoader, Dataset
Digit Recognizer
14,238,959
test['date_block_num'] = 34 del test['ID']<concatenate>
class MyDataset(Dataset): def __init__(self, feature, target=None, transform=None): self.X = feature self.Y = target self.transform = transform def __len__(self): return len(self.X) def __getitem__(self, idx): if self.transform is not None: return self.transform(self.X[idx]), self.Y[idx] elif self.Y is None: return [self.X[idx]] return self.X[idx], self.Y[idx]
Digit Recognizer
14,238,959
df = pd.concat([df,test] ).fillna(0) df = df.reset_index() del df['index']<merge>
train_data_dir = ".. /input/digit-recognizer/train.csv" test_data_dir = ".. /input/digit-recognizer/test.csv" train = pd.read_csv(train_data_dir) test = pd.read_csv(test_data_dir) raw_train_labels = train["label"] raw_train_imgs = train.drop(labels = ["label"], axis = 1) normalized_train = raw_train_imgs/255.0 normalized_test = test/255.0 train_split, validation_split, train_labels_split, validation_labels_split = train_test_split(normalized_train, raw_train_labels, random_state=0) test_data = torch.from_numpy(normalized_test.values.reshape(( -1,1,28,28))) train_data = torch.from_numpy(train_split.values.reshape(( -1,1,28,28))) train_labels_data = torch.from_numpy(train_labels_split.values) validation_data = torch.from_numpy(validation_split.values.reshape(( -1,1,28,28))) validation_labels_data = torch.from_numpy(validation_labels_split.values) train_set = MyDataset(train_data.float() , train_labels_data) valid_set = MyDataset(validation_data.float() , validation_labels_data) test_set = MyDataset(test_data.float()) batch_size = 128 train_loader = torch.utils.data.DataLoader(train_set, batch_size = batch_size, shuffle = True) valid_loader = torch.utils.data.DataLoader(valid_set, batch_size = batch_size, shuffle = False) test_loader = torch.utils.data.DataLoader(test_set, batch_size = batch_size, shuffle = False )
Digit Recognizer
14,238,959
df = pd.merge(df, sales, on=['shop_id', 'item_id', 'date_block_num'], how='left' ).fillna(0) df = pd.merge(df, dates, on=['date_block_num','shop_id'], how='left') df = pd.merge(df, items.drop(columns=['item_name','group_name','category_name']), on='item_id', how='left' )<feature_engineering>
data = next(iter(train_loader)) mean = data[0].mean() std = data[0].std() transform = transforms.Compose([ transforms.ToPILImage() , transforms.RandomAffine(degrees = 30), transforms.RandomPerspective() , transforms.ToTensor() , transforms.Normalize(mean, std)]) augmented_train_set = MyDataset(train_data.float() , train_labels_data, transform=transform) datasets = [] datasets.append(train_set) datasets.append(augmented_train_set) train_set = torch.utils.data.ConcatDataset(datasets) train_loader = torch.utils.data.DataLoader(train_set, batch_size = batch_size, shuffle = True )
Digit Recognizer
14,238,959
shops = pd.read_csv('.. /input/predict-future-sales-eng-translation/shops.csv') shops_cats = pd.DataFrame( np.array(list(product(*[df['shop_id'].unique() , df['category_id'].unique() ]))), columns =['shop_id', 'category_id'] ) temp = df.groupby(['category_id', 'shop_id'] ).agg({'item_cnt':'sum'} ).reset_index() temp2 = temp.groupby('shop_id' ).agg({'item_cnt':'sum'} ).rename(columns={'item_cnt':'shop_total'}) temp = temp.join(temp2, on='shop_id') temp['category_proportion'] = temp['item_cnt']/temp['shop_total'] temp = temp[['shop_id', 'category_id', 'category_proportion']] shops_cats = pd.merge(shops_cats, temp, on=['shop_id','category_id'], how='left') shops_cats = shops_cats.fillna(0) shops_cats = shops_cats.pivot(index='shop_id', columns=['category_id']) kmeans = KMeans(n_clusters=7, random_state=0 ).fit(shops_cats) shops_cats['shop_cluster'] = kmeans.labels_.astype('int8') shops = shops.join(shops_cats['shop_cluster'], on='shop_id' )<feature_engineering>
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") dataloaders = {'train': train_loader, 'val': valid_loader} dataset_sizes= {'train': len(train_set), 'val': len(valid_set)} def train_model(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): for phase in ['train', 'val']: if phase == 'train': model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) if phase == 'train': loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) if phase == 'train': scheduler.step() epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) model.load_state_dict(best_model_wts) return model
Digit Recognizer
14,238,959
shops.dropna(inplace=True) shops['shop_name'] = shops['shop_name'].str.lower() shops['shop_name'] = shops['shop_name'].str.replace(r'[^\w\d\s]', ' ') shops['shop_type'] = 'regular' shops.loc[shops['shop_name'].str.contains(r'tc'), 'shop_type'] = 'tc' shops.loc[shops['shop_name'].str.contains(r'mall|center|mega'), 'shop_type'] = 'mall' shops.loc[shops['shop_id'].isin([9,20]), 'shop_type'] = 'special' shops.loc[shops['shop_id'].isin([12,55]), 'shop_type'] = 'online' shops['shop_city'] = shops['shop_name'].str.split().str[0] shops.loc[shops['shop_id'].isin([12,55]), 'shop_city'] = 'online' shops.shop_city = le.fit_transform(shops.shop_city.values) shops.shop_type = le.fit_transform(shops.shop_type.values) shops.head()<merge>
epochs = 20 model_ft = models.resnet18(pretrained=False) model_ft.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, 10) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.Adam(model_ft.parameters() , lr=0.001) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=5, gamma=0.1) model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, dataloaders, dataset_sizes, num_epochs=epochs )
Digit Recognizer
14,238,959
df = pd.merge(df, shops.drop(columns='shop_name'), on='shop_id', how='left') df.head()<feature_engineering>
test_pred = torch.LongTensor() with torch.no_grad() : for images in test_loader: images = torch.autograd.Variable(images[0]) if torch.cuda.is_available() : images = images.to(device) outputs = model_ft(images) predicted = outputs.cpu().data.max(1, keepdim=True)[1] test_pred = torch.cat(( test_pred, predicted), dim=0 )
Digit Recognizer
14,238,959
df['first_sale_day'] = df.groupby('item_id')['first_sale_day'].transform('max' ).astype('int16') df.loc[df['first_sale_day']==0, 'first_sale_day'] = 1035 df['prev_days_on_sale'] = [max(idx)for idx in zip(df['first_day_of_month']-df['first_sale_day'],[0]*len(df)) ] del df['first_day_of_month']<drop_column>
out_df = pd.DataFrame(np.c_[np.arange(1, len(test_set)+1)[:,None], test_pred.numpy() ], columns=['ImageId', 'Label']) out_df.to_csv('submission.csv', index=False )
Digit Recognizer
13,971,433
del sales, categories, shops, shops_cats, temp, temp2, test, dupes, item_map, df.head()<feature_engineering>
train_path = "/kaggle/input/digit-recognizer/train.csv" test_path = "/kaggle/input/digit-recognizer/test.csv" sample_submission_path = "/kaggle/input/digit-recognizer/sample_submission.csv"
Digit Recognizer
13,971,433
df['item_cnt_unclipped'] = df['item_cnt'] df['item_cnt'] = df['item_cnt'].clip(0, 20 )<data_type_conversions>
train = pd.read_csv(train_path) test = pd.read_csv(test_path) sample_submission = pd.read_csv(sample_submission_path )
Digit Recognizer
13,971,433
def downcast(df): float_cols = [c for c in df if df[c].dtype in ["float64"]] int_cols = [c for c in df if df[c].dtype in ['int64']] df[float_cols] = df[float_cols].astype('float32') df[int_cols] = df[int_cols].astype('int16') return df df = downcast(df )<data_type_conversions>
import torch from torchvision.transforms import Normalize from torch.utils.data import Dataset, DataLoader, random_split from tqdm.notebook import tqdm
Digit Recognizer
13,971,433
df['item_age'] =(df['date_block_num'] - df.groupby('item_id')['date_block_num'].transform('min')).astype('int8') df['item_name_first4_age'] =(df['date_block_num'] - df.groupby('item_name_first4')['date_block_num'].transform('min')).astype('int8') df['item_name_first6_age'] =(df['date_block_num'] - df.groupby('item_name_first6')['date_block_num'].transform('min')).astype('int8') df['item_name_first11_age'] =(df['date_block_num'] - df.groupby('item_name_first11')['date_block_num'].transform('min')).astype('int8') df['category_age'] =(df['date_block_num'] - df.groupby('category_id')['date_block_num'].transform('min')).astype('int8') df['group_age'] =(df['date_block_num'] - df.groupby('group_id')['date_block_num'].transform('min')).astype('int8') df['shop_age'] =(df['date_block_num'] - df.groupby('shop_id')['date_block_num'].transform('min')).astype('int8' )<merge>
class LoadImagesLabels(Dataset): def __init__(self, data_frame, stats = None, kind="train"): self.stats = stats self.kind = kind self.imgs, self.labels = self.get_data(data_frame) def get_data(self, data_frame): imgs = np.zeros(shape=(data_frame.shape[0], 1, 28, 28), dtype=np.float32) labels = None if self.kind != "test": labels = np.zeros(shape=(data_frame.shape[0]), dtype=np.int64) for idx, row in tqdm(data_frame.iterrows() , total=data_frame.shape[0], desc="Extracting images and labels"): list_row = row.tolist() if self.kind != "test": labels[idx] = int(list_row[0]) imgs[idx, :] = np.asarray(list_row[1:], dtype=np.float32 ).reshape(1, 28, 28) else: imgs[idx, :] = np.asarray(list_row, dtype=np.float32 ).reshape(1, 28, 28) if self.stats is None: self.stats = {"mean":(imgs.reshape(-1, 1, 28**2)/255.0 ).mean(2 ).sum(0)/imgs.shape[0], "std":(imgs.reshape(-1, 1, 28**2)/255.0 ).std(2 ).sum(0)/imgs.shape[0]} self.create_stats_dataset() else: self.normalize = Normalize(mean=self.stats["mean"], std=self.stats["std"]) return imgs, labels def create_stats_dataset(self): self.normalize = Normalize(mean=self.stats["mean"], std=self.stats["std"]) self.unormalize = Normalize(mean=-self.stats["mean"]/self.stats["std"], std=1/self.stats["std"]) def __len__(self): return len(self.imgs) def __getitem__(self, idx): if self.labels is None: return self.normalize(torch.from_numpy(self.imgs[idx]/255.0)) else: return self.normalize(torch.from_numpy(self.imgs[idx]/255.0)) , torch.tensor(self.labels[idx], dtype=torch.int64 )
Digit Recognizer
13,971,433
temp = df.query('item_cnt > 0' ).groupby(['item_id','shop_id'] ).agg({'date_block_num':'min'} ).reset_index() temp.columns = ['item_id', 'shop_id', 'item_shop_first_sale'] df = pd.merge(df, temp, on=['item_id','shop_id'], how='left') df['item_shop_first_sale'] = df['item_shop_first_sale'].fillna(50) df['item_age_if_shop_sale'] =(df['date_block_num'] > df['item_shop_first_sale'])* df['item_age'] df['item_age_without_shop_sale'] =(df['date_block_num'] <= df['item_shop_first_sale'])* df['item_age'] del df['item_shop_first_sale']<merge>
dataset = LoadImagesLabels(train) train_dataset, val_dataset = random_split(dataset, [int(len(dataset)*0.7), len(dataset)-int(len(dataset)*0.7)]) test_dataset = LoadImagesLabels(test, dataset.stats, "test" )
Digit Recognizer
13,971,433
def agg_cnt_col(df, merging_cols, new_col,aggregation): temp = df.groupby(merging_cols ).agg(aggregation ).reset_index() temp.columns = merging_cols + [new_col] df = pd.merge(df, temp, on=merging_cols, how='left') return df df = agg_cnt_col(df, ['date_block_num','item_id'],'item_cnt_all_shops',{'item_cnt':'mean'}) df = agg_cnt_col(df, ['date_block_num','category_id','shop_id'],'item_cnt_all_shops_median',{'item_cnt':'median'}) df = agg_cnt_col(df, ['date_block_num','category_id','shop_id'],'category_cnt',{'item_cnt':'mean'}) df = agg_cnt_col(df, ['date_block_num','category_id','shop_id'],'category_cnt_median',{'item_cnt':'median'}) df = agg_cnt_col(df, ['date_block_num','category_id'],'category_cnt_all_shops',{'item_cnt':'mean'}) df = agg_cnt_col(df, ['date_block_num','category_id'],'category_cnt_all_shops_median',{'item_cnt':'median'}) df = agg_cnt_col(df, ['date_block_num','group_id','shop_id'],'group_cnt',{'item_cnt':'mean'}) df = agg_cnt_col(df, ['date_block_num','group_id'],'group_cnt_all_shops',{'item_cnt':'mean'}) df = agg_cnt_col(df, ['date_block_num','shop_id'],'shop_cnt',{'item_cnt':'mean'}) df = agg_cnt_col(df, ['date_block_num','shop_city'],'city_cnt',{'item_cnt':'mean'} )<merge>
import matplotlib.pyplot as plt import random
Digit Recognizer
13,971,433
def new_item_sales(df, merging_cols, new_col): temp =( df .query('item_age==0') .groupby(merging_cols)['item_cnt'] .mean() .reset_index() .rename(columns={'item_cnt': new_col}) ) df = pd.merge(df, temp, on=merging_cols, how='left') return df df = new_item_sales(df, ['date_block_num','category_id','shop_id'], 'new_items_in_cat') df = new_item_sales(df, ['date_block_num','category_id'], 'new_items_in_cat_all_shops' )<merge>
train_dataloader = DataLoader(train_dataset, num_workers=multiprocessing.cpu_count() , shuffle=True, batch_size=2**6) val_dataloader = DataLoader(val_dataset, num_workers=multiprocessing.cpu_count() , shuffle=False, batch_size=8 )
Digit Recognizer
13,971,433
def agg_price_col(df, merging_cols, new_col): temp = df.groupby(merging_cols ).agg({'revenue':'sum','item_cnt_unclipped':'sum'} ).reset_index() temp[new_col] = temp['revenue']/temp['item_cnt_unclipped'] temp = temp[merging_cols + [new_col]] df = pd.merge(df, temp, on=merging_cols, how='left') return df df = agg_price_col(df,['date_block_num','item_id'],'item_price') df = agg_price_col(df,['date_block_num','category_id'],'category_price') df = agg_price_col(df,['date_block_num'],'block_price' )<categorify>
import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import pytorch_lightning as pl from torchvision import models import pytorch_lightning.callbacks as pl_callbacks
Digit Recognizer
13,971,433
df = downcast(df )<merge>
class ClassificationModel(pl.LightningModule): def __init__(self, learning_rate, weights=None): super(ClassificationModel, self ).__init__() self.learning_rate = learning_rate self.loss = nn.CrossEntropyLoss(weight=weights) pretrained_resnet = models.resnet18(pretrained=True) self.encoder = nn.Sequential( nn.Upsample(size=(224, 224), mode='bilinear', align_corners=True), nn.Conv2d(in_channels=1, out_channels=3, kernel_size=1), nn.BatchNorm2d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False), pretrained_resnet ) self.decoder = nn.Sequential( nn.ReLU(inplace=True), nn.Linear(in_features=1000, out_features=10) ) def configure_optimizers(self): return optim.Adam(self.parameters() , lr=(self.learning_rate)) def forward(self, x): x = self.encoder(x) x = self.decoder(x) return x def training_step(self, batch, batch_idx): x, y = batch y_pred = self(x) loss = self.loss(y_pred, y) self.log("train_loss", loss, on_epoch=True, prog_bar=True, logger=False) return loss def validation_step(self, batch, batch_idx): x, y = batch y_pred = self(x) loss = self.loss(y_pred, y) self.log("val_loss", loss, on_epoch=True, prog_bar=True, logger=False )
Digit Recognizer
13,971,433
def lag_feature(df, lag, col, merge_cols): temp = df[merge_cols + [col]] temp = temp.groupby(merge_cols ).agg({f'{col}':'first'} ).reset_index() temp.columns = merge_cols + [f'{col}_lag{lag}'] temp['date_block_num'] += lag df = pd.merge(df, temp, on=merge_cols, how='left') df[f'{col}_lag{lag}'] = df[f'{col}_lag{lag}'].fillna(0 ).astype('float32') return df<define_variables>
trainer.fit(model, train_dataloader, val_dataloader )
Digit Recognizer
13,971,433
lag12_cols = { 'item_cnt':['date_block_num', 'shop_id', 'item_id'], 'item_cnt_all_shops':['date_block_num', 'item_id'], 'category_cnt':['date_block_num', 'shop_id', 'category_id'], 'category_cnt_all_shops':['date_block_num', 'category_id'], 'group_cnt':['date_block_num', 'shop_id', 'group_id'], 'group_cnt_all_shops':['date_block_num', 'group_id'], 'shop_cnt':['date_block_num', 'shop_id'], 'city_cnt':['date_block_num', 'shop_city'], 'new_items_in_cat':['date_block_num', 'shop_id', 'category_id'], 'new_items_in_cat_all_shops':['date_block_num', 'category_id'] } for col,merge_cols in lag12_cols.items() : df[f'{col}_lag1to12'] = 0 for i in range(1,13): df = lag_feature(df, i, col, merge_cols) df[f'{col}_lag1to12'] += df[f'{col}_lag{i}'] if i > 2: del df[f'{col}_lag{i}'] if col == 'item_cnt': del df[f'{col}_lag1'] del df[f'{col}_lag2'] else: del df[col]<drop_column>
train_accuracy = pl.metrics.Accuracy(compute_on_step=False) valid_accuracy = pl.metrics.Accuracy(compute_on_step=False) train_predictions, train_real = [], [] valid_predictions, valid_real = [], [] model.eval() model.cuda() for x, y in tqdm(train_dataloader): with torch.no_grad() : y_pred = model(x.cuda()) train_accuracy(y_pred.cpu() , y) train_predictions +=(list(y_pred.argmax(dim=1 ).cpu())) train_real += list(y) for x, y in tqdm(val_dataloader): with torch.no_grad() : y_pred = model(x.cuda()) valid_accuracy(y_pred.cpu() , y) valid_predictions +=(list(y_pred.argmax(dim=1 ).cpu())) valid_real += list(y) total_train_accuracy = train_accuracy.compute() total_valid_accuracy = valid_accuracy.compute()
Digit Recognizer
13,971,433
lag2_cols = { 'item_cnt_unclipped':['date_block_num', 'shop_id', 'item_id'], 'item_cnt_all_shops_median':['date_block_num', 'item_id'], 'category_cnt_median':['date_block_num', 'shop_id', 'category_id'], 'category_cnt_all_shops_median':['date_block_num', 'category_id'] } for col in lag2_cols: df = lag_feature(df, 1, col, merge_cols) df = lag_feature(df, 2, col, merge_cols) if col!='item_cnt_unclipped': del df[col]<feature_engineering>
print("training accuracy", round(total_train_accuracy.item() * 100, 3), "%") print("validation accuracy", round(total_valid_accuracy.item() * 100, 3), "%" )
Digit Recognizer
13,971,433
df['item_cnt_diff'] = df['item_cnt_unclipped_lag1']/df['item_cnt_lag1to12'] df['item_cnt_all_shops_diff'] = df['item_cnt_all_shops_lag1']/df['item_cnt_all_shops_lag1to12'] df['category_cnt_diff'] = df['category_cnt_lag1']/df['category_cnt_lag1to12'] df['category_cnt_all_shops_diff'] = df['category_cnt_all_shops_lag1']/df['category_cnt_all_shops_lag1to12']<drop_column>
valid_matrix = confusion_matrix(valid_real, valid_predictions, labels=range(10)) train_matrix = confusion_matrix(train_real, train_predictions, labels=range(10))
Digit Recognizer
13,971,433
df = lag_feature(df, 1, 'category_price',['date_block_num', 'category_id']) df = lag_feature(df, 1, 'block_price',['date_block_num']) del df['category_price'], df['block_price']<feature_engineering>
fail_images = [] real_labels, fail_labels = [], [] for x, y in tqdm(val_dataloader): with torch.no_grad() : y_pred = model(x.cuda()) y_pred = y_pred.argmax(dim=1 ).cpu() mask =(y_pred == y ).numpy() idxs = np.where(mask == False)[0] if idxs.shape[0] > 0: for idx in idxs: fail_images.append(x[idx]) real_labels.append(y[idx].item()) fail_labels.append(y_pred[idx].item()) fail_images = dataset.unormalize(torch.stack(fail_images, dim=0))
Digit Recognizer
13,971,433
df.loc[(df['item_age']>0)&(df['item_cnt_lag1to12'].isna()), 'item_cnt_lag1to12'] = 0 df.loc[(df['category_age']>0)&(df['category_cnt_lag1to12'].isna()), 'category_cnt_lag1to12'] = 0 df.loc[(df['group_age']>0)&(df['group_cnt_lag1to12'].isna()), 'group_cnt_lag1to12'] = 0<feature_engineering>
model.to_torchscript("/kaggle/working/mnist_digit.torch.pt" )
Digit Recognizer
13,971,433
df['item_cnt_lag1to12'] /= [min(idx)for idx in zip(df['item_age'],df['shop_age'],[12]*len(df)) ] df['item_cnt_all_shops_lag1to12'] /= [min(idx)for idx in zip(df['item_age'],[12]*len(df)) ] df['category_cnt_lag1to12'] /= [min(idx)for idx in zip(df['category_age'],df['shop_age'],[12]*len(df)) ] df['category_cnt_all_shops_lag1to12'] /= [min(idx)for idx in zip(df['category_age'],[12]*len(df)) ] df['group_cnt_lag1to12'] /= [min(idx)for idx in zip(df['group_age'],df['shop_age'],[12]*len(df)) ] df['group_cnt_all_shops_lag1to12'] /= [min(idx)for idx in zip(df['group_age'],[12]*len(df)) ] df['city_cnt_lag1to12'] /= [min(idx)for idx in zip(df['date_block_num'],[12]*len(df)) ] df['shop_cnt_lag1to12'] /= [min(idx)for idx in zip(df['shop_age'],[12]*len(df)) ] df['new_items_in_cat_lag1to12'] /= [min(idx)for idx in zip(df['category_age'],df['shop_age'],[12]*len(df)) ] df['new_items_in_cat_all_shops_lag1to12'] /= [min(idx)for idx in zip(df['category_age'],[12]*len(df)) ]<categorify>
submission = sample_submission.copy()
Digit Recognizer
13,971,433
df = downcast(df )<merge>
model.eval() predicted_labels = [] for idx, img in tqdm(enumerate(test_dataset), total=len(test_dataset)) : with torch.no_grad() : y_predict = model(img.unsqueeze(0 ).cuda()) predicted_labels.append(y_predict.argmax(dim=1 ).cpu().item() )
Digit Recognizer
13,971,433
def past_information(df, merging_cols, new_col, aggregation): temp = [] for i in range(1,35): block = df.query(f'date_block_num < {i}' ).groupby(merging_cols ).agg(aggregation ).reset_index() block.columns = merging_cols + [new_col] block['date_block_num'] = i block = block[block[new_col]>0] temp.append(block) temp = pd.concat(temp) df = pd.merge(df, temp, on=['date_block_num']+merging_cols, how='left') return df df = past_information(df, ['item_id'],'last_item_price',{'item_price':'last'}) df = past_information(df, ['shop_id','item_id'],'item_cnt_sum_alltime',{'item_cnt':'sum'}) df = past_information(df, ['item_id'],'item_cnt_sum_alltime_allshops',{'item_cnt':'sum'}) del df['revenue'], df['item_cnt_unclipped'], df['item_price']<feature_engineering>
submission["Label"] = predicted_labels submission.to_csv('/kaggle/working/submission.csv',index=False )
Digit Recognizer
13,971,433
<data_type_conversions><EOS>
json.dump(stats, open('/kaggle/working/stats.json', 'w'))
Digit Recognizer
11,420,133
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
%load_ext autoreload %autoreload 2 np.random.seed(1) w.filterwarnings('ignore' )
Digit Recognizer
11,420,133
gc.collect() df = downcast(df )<groupby>
train_file = pd.read_csv(os.path.join(main_path, "train.csv")) test_file = pd.read_csv(os.path.join(main_path, "test.csv"))
Digit Recognizer
11,420,133
def matching_name_cat_age(df,n,all_shops): temp_cols = [f'same_name{n}catage_cnt','date_block_num', f'item_name_first{n}','item_age','category_id'] if all_shops: temp_cols[0] += '_all_shops' else: temp_cols += ['shop_id'] temp = [] for i in range(1,35): block =( df .query(f'date_block_num < {i}') .groupby(temp_cols[2:]) .agg({'item_cnt':'mean'}) .reset_index() .rename(columns={'item_cnt':temp_cols[0]}) ) block = block[block[temp_cols[0]]>0] block['date_block_num'] = i temp.append(block) temp = pd.concat(temp) df = pd.merge(df, temp, on=temp_cols[1:], how='left') return df for n in [4,6,11]: for all_shops in [True,False]: df = matching_name_cat_age(df,n,all_shops )<data_type_conversions>
print("Training file : ") train_file.head(3 ).iloc[:,:17]
Digit Recognizer
11,420,133
df = downcast(df) int8_cols = [ 'item_cnt','month','group_id','shop_type', 'shop_city','shop_id','date_block_num','category_id', 'item_age', ] int16_cols = [ 'item_id','item_name_first4', 'item_name_first6','item_name_first11' ] for col in int8_cols: df[col] = df[col].astype('int8') for col in int16_cols: df[col] = df[col].astype('int16' )<merge>
print("Testing file : ") test_file.head(3 ).iloc[:,:17]
Digit Recognizer
11,420,133
def nearby_item_data(df,col): if col in ['item_cnt_unclipped_lag1','item_cnt_lag1to12']: cols = ['date_block_num', 'shop_id', 'item_id'] temp = df[cols + [col]] else: cols = ['date_block_num', 'item_id'] temp = df.groupby(cols ).agg({col:'first'} ).reset_index() [cols + [col]] temp.columns = cols + [f'below_{col}'] temp['item_id'] += 1 df = pd.merge(df, temp, on=cols, how='left') temp.columns = cols + [f'above_{col}'] temp['item_id'] -= 2 df = pd.merge(df, temp, on=cols, how='left') return df item_cols = ['item_cnt_unclipped_lag1','item_cnt_lag1to12', 'item_cnt_all_shops_lag1','item_cnt_all_shops_lag1to12'] for col in item_cols: df = nearby_item_data(df,col) del temp<feature_engineering>
train_file_norm = train_file.iloc[:, 1:] / 255.0 test_file_norm = test_file / 255.0
Digit Recognizer
11,420,133
results = Counter() items['item_name'].str.split().apply(results.update) words = [] cnts = [] for key, value in results.items() : words.append(key) cnts.append(value) counts = pd.DataFrame({'word':words,'count':cnts}) common_words = counts.query('count>200' ).word.to_list() for word in common_words: items[f'{word}_in_name'] = items['item_name'].str.contains(word ).astype('int8') drop_cols = [ 'item_id','category_id','item_name','item_name_first4', 'item_name_first6','item_name_first11', 'category_name','group_name','group_id' ] items = items.drop(columns=drop_cols )<merge>
num_examples_train = train_file.shape[0] num_examples_test = test_file.shape[0] n_h = 32 n_w = 32 n_c = 3
Digit Recognizer
11,420,133
df = df.join(items, on='item_id' )<categorify>
Train_input_images = np.zeros(( num_examples_train, n_h, n_w, n_c)) Test_input_images = np.zeros(( num_examples_test, n_h, n_w, n_c))
Digit Recognizer
11,420,133
def binary_encode(df, letters, cols): encoder = ce.BinaryEncoder(cols=[f'item_name_first{letters}'], return_df=True) temp = encoder.fit_transform(df[f'item_name_first{letters}']) df = pd.concat([df,temp], axis=1) del df[f'item_name_first{letters}_0'] name_cols = [f'item_name_first{letters}_{x}' for x in range(1,cols)] df[name_cols] = df[name_cols].astype('int8') return df df = binary_encode(df, 11, 15) del df['item_name_first4'], df['item_name_first6']<categorify>
for example in range(num_examples_train): Train_input_images[example,:28,:28,0] = train_file.iloc[example, 1:].values.reshape(28,28) Train_input_images[example,:28,:28,1] = train_file.iloc[example, 1:].values.reshape(28,28) Train_input_images[example,:28,:28,2] = train_file.iloc[example, 1:].values.reshape(28,28) for example in range(num_examples_test): Test_input_images[example,:28,:28,0] = test_file.iloc[example, :].values.reshape(28,28) Test_input_images[example,:28,:28,1] = test_file.iloc[example, :].values.reshape(28,28) Test_input_images[example,:28,:28,2] = test_file.iloc[example, :].values.reshape(28,28 )
Digit Recognizer
11,420,133
df.to_pickle('df_complete.pkl' )<set_options>
for example in range(num_examples_train): Train_input_images[example] = cv2.resize(Train_input_images[example],(n_h, n_w)) for example in range(num_examples_test): Test_input_images[example] = cv2.resize(Test_input_images[example],(n_h, n_w))
Digit Recognizer
11,420,133
%reset -f<set_options>
Train_labels = np.array(train_file.iloc[:, 0] )
Digit Recognizer
11,420,133
pd.set_option('display.max_rows', 160) pd.set_option('display.max_columns', 160) pd.set_option('display.max_colwidth', 30) warnings.filterwarnings("ignore" )<prepare_x_and_y>
train_datagen = ImageDataGenerator( rotation_range=27, width_shift_range=0.3, height_shift_range=0.2, shear_range=0.3, zoom_range=0.2, horizontal_flip=False) validation_datagen = ImageDataGenerator()
Digit Recognizer
11,420,133
df = pd.read_pickle('.. /input/files-top-scoring-notebook-output-exploration/df_complete.pkl') X_train = df[~df.date_block_num.isin([0,1,33,34])] y_train = X_train['item_cnt'] del X_train['item_cnt'] X_val = df[df['date_block_num']==33] y_val = X_val['item_cnt'] del X_val['item_cnt'] X_test = df[df['date_block_num']==34].drop(columns='item_cnt') X_test = X_test.reset_index() del X_test['index'] del df<train_model>
pretrained_model = keras.applications.resnet50.ResNet50(input_shape=(n_h, n_w, n_c), include_top=False, weights='imagenet') model = keras.Sequential([ pretrained_model, keras.layers.Flatten() , keras.layers.Dense(units=60, activation='relu'), keras.layers.Dense(units=10, activation='softmax') ] )
Digit Recognizer
11,420,133
def build_lgb_model(params, X_train, X_val, y_train, y_val, cat_features): lgb_train = lgb.Dataset(X_train, y_train) lgb_val = lgb.Dataset(X_val, y_val) model = lgb.train(params=params, train_set=lgb_train, valid_sets=(lgb_train, lgb_val), verbose_eval=50, categorical_feature=cat_features) return model<train_model>
Optimizer = 'RMSprop' model.compile(optimizer=Optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
11,420,133
params = { 'objective': 'rmse', 'metric': 'rmse', 'num_leaves': 1023, 'min_data_in_leaf':10, 'feature_fraction':0.7, 'learning_rate': 0.01, 'num_rounds': 1000, 'early_stopping_rounds': 30, 'seed': 1 } cat_features = ['category_id','month','shop_id','shop_city'] lgb_model = build_lgb_model(params, X_train, X_val, y_train, y_val, cat_features) lgb_model.save_model('initial_lgb_model.txt' )<save_to_csv>
train_images, dev_images, train_labels, dev_labels = train_test_split(Train_input_images, Train_labels, test_size=0.1, train_size=0.9, shuffle=True, random_state=44) test_images = Test_input_images
Digit Recognizer
11,420,133
submission = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/sample_submission.csv') submission['item_cnt_month'] = lgb_model.predict(X_test ).clip(0,20) submission[['ID', 'item_cnt_month']].to_csv('initial_lgb_submission.csv', index=False )<load_from_csv>
class myCallback(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy')> 0.999999): print("Stop training!") self.model.stop_training = True
Digit Recognizer
11,420,133
categories = pd.read_csv('.. /input/predict-future-sales-eng-translation/categories.csv') categories['group_name'] = categories['category_name'].str.extract(r'(^[\w\s]*)') categories['group_name'] = categories['group_name'].str.strip() items = pd.read_csv('.. /input/predict-future-sales-eng-translation/items.csv') items['item_name'] = items['item_name'].str.lower() for i in [r'[^\w\d\s]', r'\bthe\b', r'\bin\b', r'\bfor\b', r'\bof\b', r'\bd\b', r'\bis\b', r'\bon\b']: items['item_name'] = items['item_name'].str.replace(i, ' ') items['item_name'] = items['item_name'].str.replace(' ', '') items = items.join(categories.set_index('category_id'), on='category_id') shops = pd.read_csv('.. /input/predict-future-sales-eng-translation/shops.csv') shops['shop_name'] = shops['shop_name'].str.lower() shops['shop_name'] = shops['shop_name'].str.replace(r'[^\w\d\s]', ' ') shops['shop_city'] = shops['shop_name'].str.split().str[0] shops.loc[shops['shop_id'].isin([12,55]), 'shop_city'] = 'online'<predict_on_test>
EPOCHS = 5 batch_size = 212 history = model.fit_generator(train_datagen.flow(train_images,train_labels, batch_size=batch_size), steps_per_epoch=train_images.shape[0] / batch_size, epochs=EPOCHS, validation_data=validation_datagen.flow(dev_images,dev_labels, batch_size=batch_size), validation_steps=dev_images.shape[0] / batch_size, callbacks=[callbacks] )
Digit Recognizer
11,420,133
X_train['lgb_pred'] = lgb_model.predict(X_train ).clip(0,20) X_train['target'] = y_train X_train['sq_err'] =(X_train['lgb_pred']-X_train['target'])**2 X_val['lgb_pred'] = lgb_model.predict(X_val ).clip(0,20) X_val['target'] = y_val X_val['sq_err'] =(X_val['lgb_pred']-X_val['target'])**2 X_test['lgb_pred'] = lgb_model.predict(X_test ).clip(0,20 )<feature_engineering>
submission = pd.read_csv('.. /input/digit-recognizer-submission/submission.csv') submission.to_csv('digit_submission.csv', index=False )
Digit Recognizer
11,368,221
data = X_train.groupby('date_block_num' ).agg({'lgb_pred':'mean','target':'mean','sq_err':'mean'} ).reset_index() data['new_item_rmse'] = np.sqrt(X_train.query('item_age<=1' ).groupby('date_block_num' ).agg({'sq_err':'mean'} ).sq_err) data['old_item_rmse'] = np.sqrt(X_train.query('item_age>1' ).groupby('date_block_num' ).agg({'sq_err':'mean'} ).sq_err) data = data.append([ {'date_block_num':33, 'target':X_val.target.mean() , 'lgb_pred':X_val.lgb_pred.mean() , 'sq_err':np.sqrt(X_val.sq_err.mean()), 'old_item_rmse':np.sqrt(X_val.query('item_age>1' ).sq_err.mean()), 'new_item_rmse':np.sqrt(X_val.query('item_age<=1' ).sq_err.mean())}, {'date_block_num':34, 'target':0, 'lgb_pred':X_test.lgb_pred.mean() , 'sq_err':0, 'old_item_rmse':0, 'new_item_rmse':0} ], ignore_index=True ) data['date'] = [x[:7] for x in pd.date_range(start='2013-03',end='2015-09',freq='MS' ).astype('str')]+['Validation','Test']<merge>
def set_seed(seed=0): np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False os.environ['PYTHONHASHSEED'] = str(seed) set_seed(0 )
Digit Recognizer
11,368,221
df = pd.read_pickle('.. /input/files-top-scoring-notebook-output-exploration/df_complete.pkl') ( df [df['category_id'].isin(X_test.category_id.unique())] .query('item_cnt>0') .groupby('category_id') .agg({ 'category_age':'max', 'shop_id':['nunique','unique'], 'item_cnt':'sum' }) .join(categories['category_name']) .join( X_test .groupby('category_id') .agg({'item_id':'nunique'}) .rename(columns={'item_id':'test_set_items'}) ) .sort_values(( 'shop_id', 'nunique')) .head(20) )<merge>
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device )
Digit Recognizer
11,368,221
( X_val [X_val['category_id'].isin(X_test.category_id.unique())] .groupby('shop_id') .agg({ 'sq_err':'mean', 'target':'mean', 'lgb_pred':'mean' }) .join( X_test .rename(columns={'lgb_pred':'test_pred'}) .groupby('shop_id') .agg({'test_pred':'mean'}) ) .join(shops['shop_name']) .sort_values('sq_err', ascending=False) .head(20) )<merge>
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') sample_sub = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv' )
Digit Recognizer
11,368,221
( X_val [X_val['category_id'].isin(X_test.category_id.unique())] .groupby('category_id') .agg({ 'sq_err':['sum','mean'], 'target':'mean', 'lgb_pred':['sum','mean'], 'item_id':'nunique' }) .join( X_test .rename(columns={'lgb_pred':'test_pred','item_id':'test_items'}) .groupby('category_id') .agg({ 'test_pred':['sum','mean'], 'test_items':'nunique' }), on='category_id' ) .join(categories) .sort_values(( 'sq_err', 'mean'), ascending=False) .head(20) )<merge>
X = train.drop(columns='label' ).values.reshape(-1, 28, 28, 1) y = train.label.values train_X, valid_X, train_y, valid_y = train_test_split(X, y, test_size=0.2 )
Digit Recognizer
11,368,221
CATEGORY = 20 ( items[ items['item_id'].isin(X_val.item_id.unique())& items['item_id'].isin(X_test.item_id.unique()) ] [['category_id','category_name','item_id','item_name']] .join( X_test .rename(columns={'lgb_pred':'test_pred'}) .groupby('item_id') .agg({'test_pred':'mean'}), on='item_id' ) .join( X_val .groupby('item_id') .agg({ 'lgb_pred':'mean', 'target':'mean', 'sq_err':'mean', 'same_name4catage_cnt_all_shops':'first', 'new_items_in_cat_all_shops_lag1to12':'first', 'item_cnt_all_shops_lag1':'first', 'category_cnt_all_shops_lag1':'first', 'item_cnt_sum_alltime_allshops':'first', 'prev_days_on_sale':'first' }) .rename(columns={ 'lgb_pred':'val_pred', 'target':'val_target', }), on='item_id' ) .query(f'category_id=={CATEGORY}') .sort_values('sq_err',ascending=False) .rename(columns={ 'same_name4catage_cnt_all_shops':'name4mean', 'new_items_in_cat_all_shops_lag1to12':'new_in_cat_mean', 'item_cnt_all_shops_lag1':'item_cnt_lag1', 'category_cnt_all_shops_lag1':'cat_cnt_lag1', 'category_id':'cat', 'item_cnt_sum_alltime_allshops':'item_cnt_alltime' }) .head(20) )<merge>
class MNISTDataset(Dataset): def __init__(self, X, y=None, is_test=False, transforms=None): self.X = X self.y = y self.is_test = is_test self.transforms = transforms def __len__(self): return len(self.X) def __getitem__(self, index): image = self.X[index] if self.transforms: image = self.transforms(image=image)['image'] if self.is_test: return image else: return image, self.y[index]
Digit Recognizer
11,368,221
CATEGORY = 20 ( items[items['item_id'].isin(X_test.item_id.unique())] [['category_id','category_name','item_id','item_name']] .join( X_test .groupby('item_id') .agg({ 'lgb_pred':'mean', 'same_name4catage_cnt_all_shops':'first', 'new_items_in_cat_all_shops_lag1to12':'first', 'item_cnt_all_shops_lag1':'first', 'category_cnt_all_shops_lag1':'first', 'item_cnt_sum_alltime_allshops':'first', 'prev_days_on_sale':'first' }) .rename(columns={'lgb_pred':'test_pred'}), on='item_id' ) .query(f'category_id=={CATEGORY}') .sort_values('test_pred',ascending=False) .rename(columns={ 'same_name4catage_cnt_all_shops':'name4mean', 'new_items_in_cat_all_shops_lag1to12':'new_in_cat_mean', 'item_cnt_all_shops_lag1':'item_cnt_lag1', 'category_cnt_all_shops_lag1':'cat_cnt_lag1', 'item_category_id':'cat', 'item_cnt_sum_alltime_allshops':'item_cnt_alltime' }) .head(20) )<save_to_csv>
train_transforms = Compose([ToFloat(max_value=255), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, value=0, p=1), ToTensorV2() ]) valid_transforms = Compose([ToFloat(max_value=255), ToTensorV2() ] )
Digit Recognizer
11,368,221
submission_1 = pd.read_csv('.. /input/blend-boosting-best-score-on-predict-future-sales/submission_blend_1.csv') submission_2 = pd.read_csv('initial_lgb_submission.csv') submission_1['item_cnt_month'] = np.median(np.concatenate([np.expand_dims(submission_1['item_cnt_month'], axis = 1), np.expand_dims(submission_2['item_cnt_month'], axis = 1)], axis = 1), axis = 1) submission_1[['ID', 'item_cnt_month']].to_csv('final_sub.csv', index=False )<install_modules>
example_transforms = Compose([ToFloat(max_value=255), ShiftScaleRotate(shift_limit=0.3, scale_limit=0.3, rotate_limit=30, p=1), ToTensorV2() ] )
Digit Recognizer
11,368,221
!pip install -q ".. /input/pycocotools/pycocotools-2.0-cp37-cp37m-linux_x86_64.whl"<import_modules>
class CNN(nn.Module): def __init__(self): super(CNN, self ).__init__() self.dropout = nn.Dropout2d(0.5) self.bn1_1 = nn.BatchNorm2d(48) self.bn1_2 = nn.BatchNorm2d(48) self.bn2 = nn.BatchNorm1d(256) self.conv1 = nn.Conv2d(in_channels=1, out_channels=48, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=48, out_channels=48, kernel_size=3, stride=2, padding=1) self.fc1 = nn.Linear(in_features=48 * 14 * 14, out_features=256) self.fc2 = nn.Linear(in_features=256, out_features=10) def forward(self, x): x = F.relu(self.conv1(x)) x = self.bn1_1(x) x = F.relu(self.conv2(x)) x = self.bn1_2(x) x = self.dropout(x) x = torch.flatten(x, 1) x = F.relu(self.fc1(x)) x = self.bn2(x) x = self.dropout(x) x = self.fc2(x) return x
Digit Recognizer
11,368,221
import pandas as pd import numpy as np import os import tqdm import cv2 import matplotlib.pyplot as plt import seaborn as sns import gc import base64 from pycocotools import _mask as coco_mask import typing as t import zlib from torch.utils.data import Dataset, DataLoader import albumentations as A from albumentations.pytorch.transforms import ToTensorV2 import torch import torch.nn as nn from collections import OrderedDict import ttach as tta<load_from_csv>
class EarlyStopping: def __init__(self, mode, path, patience=3, delta=0): if mode not in {'min', 'max'}: raise ValueError("Argument mode must be one of 'min' or 'max'.") if patience <= 0: raise ValueError("Argument patience must be a postive integer.") if delta < 0: raise ValueError("Argument delta must not be a negative number.") self.mode = mode self.patience = patience self.delta = delta self.path = path self.best_score = np.inf if mode == 'min' else -np.inf self.counter = 0 def _is_improvement(self, val_score): if self.mode == 'max' and val_score > self.best_score + self.delta: return True elif self.mode == 'min' and val_score < self.best_score - self.delta: return True return False def __call__(self, val_score, model): if self._is_improvement(val_score): self.best_score = val_score self.counter = 0 torch.save(model.state_dict() , self.path) print('Val loss improved.Saved model.') return False else: self.counter += 1 print(f'Early stopping counter: {self.counter}/{self.patience}') if self.counter >= self.patience: print(f'Stopped early.Best val loss: {self.best_score:.4f}') return True
Digit Recognizer
11,368,221
df = pd.read_csv('.. /input/hpa-single-cell-image-classification/sample_submission.csv' )<define_variables>
def train_one_epoch(model, train_loader, optimizer, device, criterion): model.train() running_loss_train = 0 for inputs, labels in train_loader: inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss_train += loss.item() train_loss = running_loss_train / len(train_loader.dataset) return train_loss
Digit Recognizer
11,368,221
ROOT = '.. /input/hpa-single-cell-image-classification/' train_or_test = 'test'<prepare_x_and_y>
def validate(model, valid_loader, device, criterion): model.eval() correct = 0 running_loss_val = 0 with torch.no_grad() : for inputs, labels in valid_loader: inputs, labels = inputs.to(device), labels.to(device) outputs = model(inputs) loss = criterion(outputs, labels) pred = outputs.argmax(dim=1) correct += pred.eq(labels ).sum().item() running_loss_val += loss.item() val_acc = correct / len(valid_loader.dataset) val_loss = running_loss_val / len(valid_loader.dataset) return val_acc, val_loss
Digit Recognizer
11,368,221
def get_cropped_cell(img, msk): bmask = msk.astype(int)[...,None] masked_img = img * bmask true_points = np.argwhere(bmask) top_left = true_points.min(axis=0) bottom_right = true_points.max(axis=0) cropped_arr = masked_img[top_left[0]:bottom_right[0]+1,top_left[1]:bottom_right[1]+1] return cropped_arr<compute_test_metric>
def fit(model, train_loader, valid_loader, learning_rate, num_epochs): criterion = nn.CrossEntropyLoss(reduction='sum') optimizer = optim.Adam(model.parameters() , lr=learning_rate) es = EarlyStopping(mode='min', path='model.pth', patience=5) model = model.to(device) scheduler = ExponentialLR(optimizer, gamma=0.9) for epoch in range(1, num_epochs + 1): train_loss = train_one_epoch(model, train_loader, optimizer, device, criterion) val_acc, val_loss = validate(model, valid_loader, device, criterion) scheduler.step() print(f'Epoch {epoch:2}/{num_epochs}', f'train loss: {train_loss:.4f}', f'val loss: {val_loss:.4f}', f'val acc: {val_acc:.2%}', sep=' | ' ) if es(val_loss, model): break
Digit Recognizer
11,368,221
def get_stats(cropped_cell): x =(cropped_cell/255.0 ).reshape(-1,3 ).mean(0) x2 =(( cropped_cell/255.0)**2 ).reshape(-1,3 ).mean(0) return x, x2<data_type_conversions>
TRAIN_BATCH_SIZE = 64 VALID_BATCH_SIZE = 512 NUM_EPOCHS = 50 LEARNING_RATE = 1e-3 NUM_WORKERS = 0 mnist_train = MNISTDataset(train_X, train_y, is_test=False, transforms=train_transforms) mnist_valid = MNISTDataset(valid_X, valid_y, is_test=False, transforms=valid_transforms) train_loader = DataLoader(mnist_train, batch_size=TRAIN_BATCH_SIZE, shuffle=True, drop_last=True) valid_loader = DataLoader(mnist_valid, batch_size=VALID_BATCH_SIZE, shuffle=False) model = CNN() start = time.time() fit(model, train_loader, valid_loader, learning_rate=LEARNING_RATE, num_epochs=NUM_EPOCHS) print(f'Total training time: {time.time() - start}') model.load_state_dict(torch.load('model.pth'))
Digit Recognizer
11,368,221
def read_img(image_id, color, train_or_test='test', image_size=None): filename = f'{ROOT}/{train_or_test}/{image_id}_{color}.png' assert os.path.exists(filename), f'not found {filename}' img = cv2.imread(filename, cv2.IMREAD_UNCHANGED) if image_size is not None: img = cv2.resize(img,(image_size, image_size)) if img.max() > 255: img_max = img.max() img =(img/255 ).astype('uint8') return img<categorify>
TEST_BATCH_SIZE = 512 test_transforms = Compose([ToFloat(max_value=255), ToTensorV2() ]) test_X = test.values.reshape(-1, 28, 28, 1) mnist_test = MNISTDataset(test_X, is_test=True, transforms=test_transforms) test_loader = DataLoader(mnist_test, batch_size=TEST_BATCH_SIZE, shuffle=False )
Digit Recognizer
11,368,221
def encode_binary_mask(mask: np.ndarray)-> t.Text: if mask.dtype != np.bool: raise ValueError( "encode_binary_mask expects a binary mask, received dtype == %s" % mask.dtype) mask = np.squeeze(mask) if len(mask.shape)!= 2: raise ValueError( "encode_binary_mask expects a 2d mask, received shape == %s" % mask.shape) mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1) mask_to_encode = mask_to_encode.astype(np.uint8) mask_to_encode = np.asfortranarray(mask_to_encode) encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"] binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION) base64_str = base64.b64encode(binary_str) return base64_str.decode('ascii' )<choose_model_class>
def predict(model, test_loader, device): model.eval() predictions = sample_sub['Label'].values with torch.no_grad() : for i, inputs in enumerate(test_loader): inputs = inputs.to(device) outputs = model(inputs) pred = outputs.argmax(dim=1 ).to('cpu' ).numpy() predictions[i*TEST_BATCH_SIZE:i*TEST_BATCH_SIZE+len(inputs)] = pred output = sample_sub.copy() output['Label'] = predictions output.to_csv('submission.csv', index=False) return output output = predict(model, test_loader, device) output
Digit Recognizer
11,601,712
warnings.filterwarnings("ignore", category=SourceChangeWarning) NUC_MODEL = ".. /input/hpacellsegmentatormodelweights/dpn_unet_nuclei_v1.pth" CELL_MODEL = ".. /input/hpacellsegmentatormodelweights/dpn_unet_cell_3ch_v1.pth" segmentator = cellsegmentator.CellSegmentator( NUC_MODEL, CELL_MODEL, scale_factor=0.25, device="cuda", padding=True, multi_channel_model=True, ); <load_from_csv>
from tensorflow.keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint from tensorflow.keras.models import Sequential, load_model from tensorflow.keras.utils import to_categorical from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt
Digit Recognizer
11,601,712
df = pd.read_csv('./cell_df.csv' )<create_dataframe>
np.random.seed(42 )
Digit Recognizer
11,601,712
valid_transforms = A.Compose([ A.Resize(width=224, height=224), A.Normalize() , ToTensorV2() , ]) class CellDataset(Dataset): def __init__(self, data_dir, csv_file, transform=None): super().__init__() self.data_dir = data_dir self.df = csv_file self.transforms = transform self.img_ids = self.df['image_id'].values self.cell_ids = self.df['cell_id'].values def __len__(self): return len(self.img_ids) def get_image(self, index): image_id = self.img_ids[index] cell_id = self.cell_ids[index] img_path = os.path.join(self.data_dir, 'cells', image_id + '_' + str(cell_id)+ '.jpg') img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = self.transforms(image=img) img = img['image'] return img def __getitem__(self, index): x = self.get_image(index) return x<create_dataframe>
def doSubmission(y_pred): test_Id = np.arange(1, y_pred.size+1, dtype=np.int) pred_dict = {"ImageId": test_Id, "Label": y_pred} df = pd.DataFrame(pred_dict) df.to_csv("sample_submission.csv", index=False, index_label=False )
Digit Recognizer
11,601,712
test_dataset = CellDataset(data_dir='', csv_file=df, transform=valid_transforms )<load_pretrained>
df_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") df_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
11,601,712
test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False, num_workers=4, drop_last=False )<choose_model_class>
y = df_train.label.to_numpy() X = df_train.drop(columns=["label"] ).to_numpy(np.float64) X /= 255.0
Digit Recognizer
11,601,712
class Net(nn.Module): def __init__(self, name = 'efficientnet_b0', num_classes=19): super(Net, self ).__init__() self.model = timm.create_model(name, pretrained=False, num_classes=num_classes) def forward(self, x): out = self.model(x) return out <feature_engineering>
X_totrain = X.reshape(X.shape[0], 28, 28, 1 )
Digit Recognizer
11,601,712
def update_state_dict(state_dict): new_state_dict = OrderedDict() for key in state_dict.keys() : new_state_dict['.'.join(key.split('.')[1:])] = state_dict[key] return new_state_dict<load_pretrained>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y) X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1 )
Digit Recognizer
11,601,712
model_b1_f0 = Net(name = 'efficientnet_b1') model_b1_f0.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b1-224-fold-0/epoch1-valid_loss_epoch0.118.pth'))) model_b1_f0.cuda() ; model_b1_f0.eval() ; model_b1_f1 = Net(name = 'efficientnet_b1') model_b1_f1.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b1-224-fold-1/epoch1-valid_loss_epoch0.117.pth'))) model_b1_f1.cuda() ; model_b1_f1.eval() ; model_b1_f2 = Net(name = 'efficientnet_b1') model_b1_f2.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b1-224-fold-4/epoch1-valid_loss_epoch0.119.pth'))) model_b1_f2.cuda() ; model_b1_f2.eval() ; model_b1_f3 = Net(name = 'efficientnet_b1') model_b1_f3.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b1-224-fold-3/epoch1-valid_loss_epoch0.116.pth'))) model_b1_f3.cuda() ; model_b1_f3.eval() ; model_b1_f4 = Net(name = 'efficientnet_b1') model_b1_f4.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b1-224-fold-44/epoch1-valid_loss_epoch0.117.pth'))) model_b1_f4.cuda() ; model_b1_f4.eval() ; model_b0_f0 = Net(name = 'efficientnet_b0') model_b0_f0.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b0-224-fold-0/epoch1-valid_loss_epoch0.119.pth'))) model_b0_f0.cuda() ; model_b0_f0.eval() ; model_b0_f1 = Net(name = 'efficientnet_b0') model_b0_f1.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b0-224-fold-1/epoch1-valid_loss_epoch0.118.pth'))) model_b0_f1.cuda() ; model_b0_f1.eval() ; model_b0_f2 = Net(name = 'efficientnet_b0') model_b0_f2.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b0-224-fold-2/epoch1-valid_loss_epoch0.120.pth'))) model_b0_f2.cuda() ; model_b0_f2.eval() ; model_b0_f3 = Net(name = 'efficientnet_b0') model_b0_f3.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b0-224-fold-3/epoch1-valid_loss_epoch0.117.pth'))) model_b0_f3.cuda() ; model_b0_f3.eval() ; model_b0_f4 = Net(name = 'efficientnet_b0') model_b0_f4.load_state_dict(update_state_dict(torch.load('.. /input/efficientnet-b0-224-fold-4/epoch8-valid_loss_epoch0.117.pth'))) model_b0_f4.cuda() ; model_b0_f4.eval() ;<prepare_output>
y_cat = to_categorical(y, 10) y_train_cat = to_categorical(y_train, 10) y_test_cat = to_categorical(y_test, 10 )
Digit Recognizer
11,601,712
pred = torch.FloatTensor() pred = pred.cuda()<categorify>
test = df_test.to_numpy(np.float64) test = test.reshape(test.shape[0], 28, 28, 1) test /= 255.0
Digit Recognizer
11,601,712
tta1 = tta.ClassificationTTAWrapper(model_b1_f0, tta.aliases.flip_transform()) tta2 = tta.ClassificationTTAWrapper(model_b1_f1, tta.aliases.flip_transform()) tta3 = tta.ClassificationTTAWrapper(model_b1_f2, tta.aliases.flip_transform()) tta4 = tta.ClassificationTTAWrapper(model_b1_f3, tta.aliases.flip_transform()) tta5 = tta.ClassificationTTAWrapper(model_b1_f4, tta.aliases.flip_transform()) tta6 = tta.ClassificationTTAWrapper(model_b0_f0, tta.aliases.flip_transform()) tta7 = tta.ClassificationTTAWrapper(model_b0_f1, tta.aliases.flip_transform()) tta8 = tta.ClassificationTTAWrapper(model_b0_f2, tta.aliases.flip_transform()) tta9 = tta.ClassificationTTAWrapper(model_b0_f3, tta.aliases.flip_transform()) tta10 = tta.ClassificationTTAWrapper(model_b0_f4, tta.aliases.flip_transform() )<normalization>
def convNeuralNetwork(filters=256, kernel_size=(3, 3), pool_size=(2, 2), units=128, dropout=0.2): cnn = Sequential() cnn.add(Conv2D(filters=filters, kernel_size=kernel_size, strides=(1, 1), input_shape=(28, 28, 1), activation="relu", padding="same")) cnn.add(MaxPool2D(pool_size=pool_size, padding="same")) cnn.add(Conv2D(filters=filters, kernel_size=kernel_size, strides=(1, 1), activation="relu", padding="same")) cnn.add(MaxPool2D(pool_size=pool_size, padding="same")) cnn.add(Flatten()) cnn.add(Dense(units=units, activation="relu")) cnn.add(Dropout(dropout)) cnn.add(Dense(units=units, activation="relu")) cnn.add(Dropout(dropout)) cnn.add(Dense(units=10, activation="softmax")) cnn.compile(optimizer="adamax", loss="categorical_crossentropy", metrics=["accuracy"]) return cnn
Digit Recognizer
11,601,712
with torch.no_grad() : for inp in tqdm.tqdm(test_loader): bs, c, h, w = inp.size() input_var = torch.autograd.Variable(inp.view(-1, c, h, w ).cuda()) output =(tta1(input_var)+tta2(input_var)+\ tta3(input_var)+tta4(input_var)+\ tta5(input_var)+tta6(input_var)+\ tta7(input_var)+tta8(input_var)+tta9(input_var)+tta10(input_var)) /10 output_mean = output.view(bs, -1) pred = torch.cat(( pred, output_mean.data), 0 )<count_missing_values>
early_stopping = EarlyStopping(monitor="val_loss", patience=10, verbose=1, restore_best_weights=True) cnn = convNeuralNetwork(filters=2048, units=1024) cnn_hist = cnn.fit(X_train, y_train_cat, validation_data=(X_test, y_test_cat), epochs=50, batch_size=256, callbacks=[early_stopping] )
Digit Recognizer
11,601,712
def isNaN(num): return num != num<load_from_csv>
early_stopping = EarlyStopping(monitor="loss", patience=10, restore_best_weights=True) model_checkpoint = ModelCheckpoint(filepath="./", monitor="loss", verbose=1, save_best_only=True, save_weights_only=True) model = convNeuralNetwork(filters=2048, units=1024) model_hist = model.fit(X_totrain, y_cat, batch_size=256, epochs=50, callbacks=[early_stopping, model_checkpoint] )
Digit Recognizer
11,601,712
cell_df = pd.read_csv('./cell_df.csv') cell_df['cls'] = ''<feature_engineering>
y_pred = model.predict(test ).argmax(1) doSubmission(y_pred )
Digit Recognizer
11,684,407
threshold = 0.0 for i in range(pred_torch.shape[0]): p = torch.nonzero(pred_torch[i] > threshold ).squeeze().numpy().tolist() if type(p)!= list: p = [p] if len(p)== 0: cls = [(pred_torch[i].argmax().item() , pred_torch[i].max().item())] else: cls = [(x, pred_torch[i][x].item())for x in p] cell_df['cls'].loc[i] = cls<categorify>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from sklearn.model_selection import train_test_split
Digit Recognizer
11,684,407
def combine(r): cls = r[0] enc = r[1] classes = [str(c[0])+ ' ' + str(c[1])+ ' ' + enc for c in cls] return ' '.join(classes) combine(cell_df[['cls', 'enc']].loc[24]);<feature_engineering>
training = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') testing = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
Digit Recognizer
11,684,407
cell_df['pred'] = cell_df[['cls', 'enc']].apply(combine, axis=1) cell_df.head()<groupby>
X = training.drop(columns = 'label' ).values y = training['label'].values
Digit Recognizer
11,684,407
subm = cell_df.groupby(['image_id'])['pred'].apply(lambda x: ' '.join(x)).reset_index() subm.head()<load_from_csv>
X = X/255 X = X.reshape(X.shape[0], *(28, 28, 1)) print(X.shape )
Digit Recognizer
11,684,407
sample_submission = pd.read_csv('.. /input/hpa-single-cell-image-classification/sample_submission.csv') sample_submission.head()<merge>
testing = testing.values testing = testing/255 testing = testing.reshape(testing.shape[0], *(28, 28, 1))
Digit Recognizer
11,684,407
sub = pd.merge( sample_submission, subm, how="left", left_on='ID', right_on='image_id', ) sub.head()<feature_engineering>
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.2, random_state = 0 )
Digit Recognizer
11,684,407
def isNaN(num): return num != num for i, row in sub.iterrows() : if isNaN(row['pred']): continue sub.PredictionString.loc[i] = row['pred']<save_to_csv>
cnn = tf.keras.models.Sequential()
Digit Recognizer
11,684,407
sub.to_csv('submission.csv', index=False )<save_to_csv>
cnn.add(layer = tf.keras.layers.Conv2D(input_shape =(28, 28, 1), filters=64, kernel_size =(3,3), activation = 'relu'))
Digit Recognizer
11,684,407
sub.to_csv('submission.csv', index=False )<load_from_csv>
cnn.add(layer = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides = 2))
Digit Recognizer
11,684,407
!rm cell_df.csv<install_modules>
cnn.add(layer = tf.keras.layers.Conv2D(filters=64, kernel_size =(3,3), activation = 'relu'))
Digit Recognizer
11,684,407
!pip install ".. /input/pycocotools/pycocotools-2.0-cp37-cp37m-linux_x86_64.whl" !pip install ".. /input/hpapytorchzoozip/pytorch_zoo-master" !pip install ".. /input/hpacellsegmentatorraman/HPA-Cell-Segmentation/" !pip install ".. /input/efficientnet-keras-source-code/" --no-deps !pip install ".. /input/kerasapplications" <install_modules>
cnn.add(layer = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides = 2))
Digit Recognizer
11,684,407
!pip install ".. /input/efficientnet-pytorch-07/efficientnet_pytorch-0.7.0" <set_options>
cnn.add(layer = tf.keras.layers.Flatten() )
Digit Recognizer
11,684,407
gpus = tf.config.list_physical_devices('GPU') if gpus: try: for gpu in gpus: tf.config.experimental.set_virtual_device_configuration( gpu, [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=5600)]) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: print(e )<load_from_csv>
cnn.add(layer = tf.keras.layers.Dropout(0.5))
Digit Recognizer