kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
4,361,589
!pip install --upgrade xgboost xgb.__version__<init_hyperparams>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
4,361,589
xgb_params= { "objective": "reg:squarederror", "max_depth": 6, "learning_rate": 0.01, "colsample_bytree": 0.4, "subsample": 0.6, "reg_alpha" : 6, "min_child_weight": 100, "n_jobs": 2, "seed": 2001, 'tree_method': "gpu_hist", "gpu_id": 0, }<define_variables>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
4,361,589
train_oof = np.zeros(( 300000,)) test_preds = 0 train_oof.shape<prepare_x_and_y>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
4,361,589
Test = xgb.DMatrix(Test[columns] )<train_model>
random_seed = 2
Digit Recognizer
4,361,589
NUM_FOLDS = 10 kf = KFold(n_splits=NUM_FOLDS, shuffle=True, random_state=0) for f,(train_ind, val_ind)in tqdm(enumerate(kf.split(train, target))): train_df, val_df = train.iloc[train_ind][columns], train.iloc[val_ind][columns] train_target, val_target = target[train_ind], target[val_ind] train_df = xgb.DMatrix(train_df, label=train_target) val_df = xgb.DMatrix(val_df, label=val_target) model = xgb.train(xgb_params, train_df, 3600) temp_oof = model.predict(val_df) temp_test = model.predict(Test) train_oof[val_ind] = temp_oof test_preds += temp_test/NUM_FOLDS print(mean_squared_error(temp_oof, val_target, squared=False))<compute_test_metric>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed )
Digit Recognizer
4,361,589
mean_squared_error(train_oof, target, squared=False )<save_model>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
4,361,589
np.save('train_oof', train_oof) np.save('test_preds', test_preds )<predict_on_test>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
4,361,589
%%time shap_preds = model.predict(test, pred_contribs=True )<load_from_csv>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
4,361,589
train = pd.read_csv('.. /input/tabular-playground-series-feb-2021/train.csv') test = pd.read_csv('.. /input/tabular-playground-series-feb-2021/test.csv') for feature in cat_features: le = LabelEncoder() le.fit(train[feature]) train[feature] = le.transform(train[feature]) test[feature] = le.transform(test[feature] )<save_to_csv>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
4,361,589
sample_sub['target'] = test_preds sample_sub.to_csv('submission.csv', index=False )<install_modules>
epochs = 30 batch_size = 86
Digit Recognizer
4,361,589
!pip install librosa<import_modules>
Digit Recognizer
4,361,589
import numpy as np import pandas as pd import os<load_pretrained>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
4,361,589
audio_data = '/kaggle/input/birdsong-recognition/train_audio/nutwoo/XC462016.mp3' x , sr = librosa.load(audio_data) print(type(x), type(sr)) print(x.shape, sr )<load_pretrained>
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
4,361,589
librosa.load(audio_data, sr=44100 )<normalization>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
4,361,589
<set_options><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
3,365,490
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
GlobalAveragePooling2D, Conv2D, BatchNormalization, Dropout INPUT_DIR = '.. /input' EMB_SIZE = 8 BATCH_SIZE = 1024 N_FOLDS = 2 N_ITER = 50 SEED = 32
Digit Recognizer
3,365,490
sr = 22050 T = 5.0 t = np.linspace(0, T, int(T*sr), endpoint=False) x = 0.5*np.sin(2*np.pi*220*t) ipd.Audio(x, rate=sr) librosa.output.write_wav('tone_220.wav', x, sr )<normalization>
def _all_diffs(a, b): return tf.expand_dims(a, axis=1)- tf.expand_dims(b, axis=0) def _cdist(a, b, metric='euclidean'): with tf.name_scope("_cdist"): diffs = _all_diffs(a, b) if metric == 'sqeuclidean': return tf.reduce_sum(tf.square(diffs), axis=-1) elif metric == 'euclidean': return tf.sqrt(tf.reduce_sum(tf.square(diffs), axis=-1)+ 1e-12) elif metric == 'cityblock': return tf.reduce_sum(tf.abs(diffs), axis=-1) else: raise NotImplementedError( 'The following metric is not implemented by `_cdist` yet: {}'.format(metric)) _cdist.supported_metrics = [ 'euclidean', 'sqeuclidean', 'cityblock', ] def _get_at_indices(tensor, indices): counter = tf.range(tf.shape(indices, out_type=indices.dtype)[0]) return tf.gather_nd(tensor, tf.stack(( counter, indices), -1)) def batch_hard_loss(features, pids, metric='euclidean', margin=0.1): with tf.name_scope("batch_hard_loss"): dists = _cdist(features, features, metric=metric) pids = tf.argmax(pids, axis=1) exp_dims0 = tf.expand_dims(pids, axis=0) exp_dims1 = tf.expand_dims(pids, axis=1) same_identity_mask = tf.equal(exp_dims1, exp_dims0) negative_mask = tf.logical_not(same_identity_mask) positive_mask = tf.logical_xor(same_identity_mask, tf.eye(tf.shape(pids)[0], dtype=tf.bool)) furthest_positive = tf.reduce_max(dists*tf.cast(positive_mask, tf.float32), axis=1) closest_negative = tf.reduce_min(dists + 1e5*tf.cast(same_identity_mask, tf.float32), axis=1) diff = furthest_positive - closest_negative if isinstance(margin, numbers.Real): diff = tf.maximum(diff + margin, 0.0) elif margin == 'soft': diff = tf.nn.softplus(diff) elif margin is None: pass else: raise NotImplementedError('The margin {} is not implemented in batch_hard_loss'.format(margin)) return diff def triplet_loss(labels, features): return tf.reduce_mean(batch_hard_loss(features, labels, margin=0.2))
Digit Recognizer
3,365,490
zero_crossings = librosa.zero_crossings(x[n0:n1], pad=False) print(sum(zero_crossings))<import_modules>
def f1(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) recall = true_positives /(possible_positives + K.epsilon()) precision = true_positives /(predicted_positives + K.epsilon()) return 2 *(( precision * recall)/(precision + recall + K.epsilon()))
Digit Recognizer
3,365,490
import cv2 import audioread import logging import os import random import time import warnings import librosa import numpy as np import pandas as pd import soundfile as sf import torch import torch.nn as nn import torch.cuda import torch.nn.functional as F import torch.utils.data as data from contextlib import contextmanager from pathlib import Path from typing import Optional from fastprogress import progress_bar from sklearn.metrics import f1_score from torchvision import models<set_options>
df_train = pd.read_csv(os.path.join(INPUT_DIR, 'train.csv')) df_test = pd.read_csv(os.path.join(INPUT_DIR, 'test.csv')) print(df_train.head() )
Digit Recognizer
3,365,490
def set_seed(seed: int = 42): random.seed(seed) np.random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True def get_logger(out_file=None): logger = logging.getLogger() formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") logger.handlers = [] logger.setLevel(logging.INFO) handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(logging.INFO) logger.addHandler(handler) if out_file is not None: fh = logging.FileHandler(out_file) fh.setFormatter(formatter) fh.setLevel(logging.INFO) logger.addHandler(fh) logger.info("logger set up") return logger @contextmanager def timer(name: str, logger: Optional[logging.Logger] = None): t0 = time.time() msg = f"[{name}] start" if logger is None: print(msg) else: logger.info(msg) yield msg = f"[{name}] done in {time.time() - t0:.2f} s" if logger is None: print(msg) else: logger.info(msg )<set_options>
x_train = df_train.iloc[:,1:].values.astype('float32')/ 255. x_test = df_test.values.astype('float32')/ 255. xc_train = np.reshape(x_train,(len(x_train), 28, 28, 1)) xc_test = np.reshape(x_test,(len(x_test), 28, 28, 1)) y_train = df_train.label.values yc_train = to_categorical(y_train) input_size = output_size = x_train.shape[1] input_csize = output_csize = xc_train.shape[1] print(xc_train[:5], xc_test[:5])
Digit Recognizer
3,365,490
logger = get_logger("main.log") set_seed(1213 )<define_variables>
def base_network(model_type='triplet', input_shape=input_csize): if model_type == 'autoencoder': pass elif model_type == 'triplet': model = Sequential([ Conv2D(filters=64, kernel_size=(3, 3), padding='same', input_shape=(input_csize, input_csize, 1,), activation='relu'), Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu'), BatchNormalization() , MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'), Dropout(0.25), Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu'), Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu'), BatchNormalization() , MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'), Dropout(0.25), Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu'), Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu'), BatchNormalization() , MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'), Dropout(0.25), Flatten() , Dense(512, activation='relu'), Dense(256, activation='relu'), BatchNormalization() , Dropout(0.25), Dense(10, name='embeddings', activation='softmax'), ]) return model
Digit Recognizer
3,365,490
TARGET_SR = 32000 TEST = Path(".. /input/birdsong-recognition/test_audio" ).exists()<load_from_csv>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(xc_train )
Digit Recognizer
3,365,490
if TEST: DATA_DIR = Path(".. /input/birdsong-recognition/") else: DATA_DIR = Path(".. /input/birdcall-check/") test = pd.read_csv(DATA_DIR / "test.csv") test_audio = DATA_DIR / "test_audio" test.head()<save_to_csv>
yfull_test = [] skf = StratifiedKFold(n_splits=N_FOLDS, random_state=SEED, shuffle=True) print(len(xc_train), len(y_train)) for i,(train_index, val_index)in enumerate(skf.split(xc_train, y_train)) : triplet_model = base_network() triplet_model.compile(optimizer=RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0), loss=['categorical_crossentropy'], metrics=[f1]) weights_path = os.path.join('.', f'w{i}.h5') callbacks=[ ReduceLROnPlateau(monitor='val_f1', patience=3, verbose=1, factor=0.5, min_lr=0.00001), ModelCheckpoint(weights_path, monitor='val_f1', mode='max', save_best_only=True, verbose=0) ] xb_train = xc_train[train_index] yb_train = yc_train[train_index] xb_val = xc_train[val_index] yb_val = yc_train[val_index] history = triplet_model.fit_generator( datagen.flow(xb_train, yb_train, batch_size=BATCH_SIZE), validation_data=datagen.flow(xb_val, yb_val, batch_size=BATCH_SIZE), validation_steps=xb_val.shape[0] // BATCH_SIZE, verbose=2, epochs=N_ITER, steps_per_epoch=xb_train.shape[0] // BATCH_SIZE, shuffle=True, callbacks=callbacks) if os.path.isfile(weights_path): triplet_model.load_weights(weights_path) print(fbeta_score(yb_val, np.array(triplet_model.predict(xb_val, batch_size=128, verbose=2)) > 0.2, beta=2, average='samples')) yfull_test.append(triplet_model.predict(xc_test, batch_size=128, verbose=2))
Digit Recognizer
3,365,490
sub = pd.read_csv(".. /input/birdsong-recognition/sample_submission.csv") sub.to_csv("submission.csv", index=False )<choose_model_class>
pred = np.array(yfull_test) pred = np.argmax(pred, axis=2) values, counts = np.unique(pred, axis=0, return_counts=True) pred = values[np.argmax(counts)] print(pred.shape )
Digit Recognizer
3,365,490
class ResNet(nn.Module): def __init__(self, base_model_name: str, pretrained=False, num_classes=264): super().__init__() base_model = models.__getattribute__(base_model_name )( pretrained=pretrained) layers = list(base_model.children())[:-2] layers.append(nn.AdaptiveMaxPool2d(1)) self.encoder = nn.Sequential(*layers) in_features = base_model.fc.in_features self.classifier = nn.Sequential( nn.Linear(in_features, 1024), nn.ReLU() , nn.Dropout(p=0.2), nn.Linear(1024, 1024), nn.ReLU() , nn.Dropout(p=0.2), nn.Linear(1024, num_classes)) def forward(self, x): batch_size = x.size(0) x = self.encoder(x ).view(batch_size, -1) x = self.classifier(x) multiclass_proba = F.softmax(x, dim=1) multilabel_proba = F.sigmoid(x) return { "logits": x, "multiclass_proba": multiclass_proba, "multilabel_proba": multilabel_proba }<define_variables>
Digit Recognizer
3,365,490
model_config = { "base_model_name": "resnet50", "pretrained": False, "num_classes": 264 } melspectrogram_parameters = { "n_mels": 128, "fmin": 20, "fmax": 16000 } weights_path = ".. /input/birdcall-resnet50-init-weights/best.pth"<define_variables>
submission = pd.DataFrame({'ImageId': range(1, pred.shape[0]+1), 'Label': pred}) submission.to_csv('submission.csv', index=False )
Digit Recognizer
4,861,518
BIRD_CODE = { 'aldfly': 0, 'ameavo': 1, 'amebit': 2, 'amecro': 3, 'amegfi': 4, 'amekes': 5, 'amepip': 6, 'amered': 7, 'amerob': 8, 'amewig': 9, 'amewoo': 10, 'amtspa': 11, 'annhum': 12, 'astfly': 13, 'baisan': 14, 'baleag': 15, 'balori': 16, 'banswa': 17, 'barswa': 18, 'bawwar': 19, 'belkin1': 20, 'belspa2': 21, 'bewwre': 22, 'bkbcuc': 23, 'bkbmag1': 24, 'bkbwar': 25, 'bkcchi': 26, 'bkchum': 27, 'bkhgro': 28, 'bkpwar': 29, 'bktspa': 30, 'blkpho': 31, 'blugrb1': 32, 'blujay': 33, 'bnhcow': 34, 'boboli': 35, 'bongul': 36, 'brdowl': 37, 'brebla': 38, 'brespa': 39, 'brncre': 40, 'brnthr': 41, 'brthum': 42, 'brwhaw': 43, 'btbwar': 44, 'btnwar': 45, 'btywar': 46, 'buffle': 47, 'buggna': 48, 'buhvir': 49, 'bulori': 50, 'bushti': 51, 'buwtea': 52, 'buwwar': 53, 'cacwre': 54, 'calgul': 55, 'calqua': 56, 'camwar': 57, 'cangoo': 58, 'canwar': 59, 'canwre': 60, 'carwre': 61, 'casfin': 62, 'caster1': 63, 'casvir': 64, 'cedwax': 65, 'chispa': 66, 'chiswi': 67, 'chswar': 68, 'chukar': 69, 'clanut': 70, 'cliswa': 71, 'comgol': 72, 'comgra': 73, 'comloo': 74, 'commer': 75, 'comnig': 76, 'comrav': 77, 'comred': 78, 'comter': 79, 'comyel': 80, 'coohaw': 81, 'coshum': 82, 'cowscj1': 83, 'daejun': 84, 'doccor': 85, 'dowwoo': 86, 'dusfly': 87, 'eargre': 88, 'easblu': 89, 'easkin': 90, 'easmea': 91, 'easpho': 92, 'eastow': 93, 'eawpew': 94, 'eucdov': 95, 'eursta': 96, 'evegro': 97, 'fiespa': 98, 'fiscro': 99, 'foxspa': 100, 'gadwal': 101, 'gcrfin': 102, 'gnttow': 103, 'gnwtea': 104, 'gockin': 105, 'gocspa': 106, 'goleag': 107, 'grbher3': 108, 'grcfly': 109, 'greegr': 110, 'greroa': 111, 'greyel': 112, 'grhowl': 113, 'grnher': 114, 'grtgra': 115, 'grycat': 116, 'gryfly': 117, 'haiwoo': 118, 'hamfly': 119, 'hergul': 120, 'herthr': 121, 'hoomer': 122, 'hoowar': 123, 'horgre': 124, 'horlar': 125, 'houfin': 126, 'houspa': 127, 'houwre': 128, 'indbun': 129, 'juntit1': 130, 'killde': 131, 'labwoo': 132, 'larspa': 133, 'lazbun': 134, 'leabit': 135, 'leafly': 136, 'leasan': 137, 'lecthr': 138, 'lesgol': 139, 'lesnig': 140, 'lesyel': 141, 'lewwoo': 142, 'linspa': 143, 'lobcur': 144, 'lobdow': 145, 'logshr': 146, 'lotduc': 147, 'louwat': 148, 'macwar': 149, 'magwar': 150, 'mallar3': 151, 'marwre': 152, 'merlin': 153, 'moublu': 154, 'mouchi': 155, 'moudov': 156, 'norcar': 157, 'norfli': 158, 'norhar2': 159, 'normoc': 160, 'norpar': 161, 'norpin': 162, 'norsho': 163, 'norwat': 164, 'nrwswa': 165, 'nutwoo': 166, 'olsfly': 167, 'orcwar': 168, 'osprey': 169, 'ovenbi1': 170, 'palwar': 171, 'pasfly': 172, 'pecsan': 173, 'perfal': 174, 'phaino': 175, 'pibgre': 176, 'pilwoo': 177, 'pingro': 178, 'pinjay': 179, 'pinsis': 180, 'pinwar': 181, 'plsvir': 182, 'prawar': 183, 'purfin': 184, 'pygnut': 185, 'rebmer': 186, 'rebnut': 187, 'rebsap': 188, 'rebwoo': 189, 'redcro': 190, 'redhea': 191, 'reevir1': 192, 'renpha': 193, 'reshaw': 194, 'rethaw': 195, 'rewbla': 196, 'ribgul': 197, 'rinduc': 198, 'robgro': 199, 'rocpig': 200, 'rocwre': 201, 'rthhum': 202, 'ruckin': 203, 'rudduc': 204, 'rufgro': 205, 'rufhum': 206, 'rusbla': 207, 'sagspa1': 208, 'sagthr': 209, 'savspa': 210, 'saypho': 211, 'scatan': 212, 'scoori': 213, 'semplo': 214, 'semsan': 215, 'sheowl': 216, 'shshaw': 217, 'snobun': 218, 'snogoo': 219, 'solsan': 220, 'sonspa': 221, 'sora': 222, 'sposan': 223, 'spotow': 224, 'stejay': 225, 'swahaw': 226, 'swaspa': 227, 'swathr': 228, 'treswa': 229, 'truswa': 230, 'tuftit': 231, 'tunswa': 232, 'veery': 233, 'vesspa': 234, 'vigswa': 235, 'warvir': 236, 'wesblu': 237, 'wesgre': 238, 'weskin': 239, 'wesmea': 240, 'wessan': 241, 'westan': 242, 'wewpew': 243, 'whbnut': 244, 'whcspa': 245, 'whfibi': 246, 'whtspa': 247, 'whtswi': 248, 'wilfly': 249, 'wilsni1': 250, 'wiltur': 251, 'winwre3': 252, 'wlswar': 253, 'wooduc': 254, 'wooscj2': 255, 'woothr': 256, 'y00475': 257, 'yebfly': 258, 'yebsap': 259, 'yehbla': 260, 'yelwar': 261, 'yerwar': 262, 'yetvir': 263 } INV_BIRD_CODE = {v: k for k, v in BIRD_CODE.items() }<choose_model_class>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
4,861,518
def get_model(config: dict, weights_path: str): model = ResNet(**config) checkpoint = torch.load(weights_path) model.load_state_dict(checkpoint["model_state_dict"]) device = torch.device("cuda") model.to(device) model.eval() return model<create_dataframe>
train_y = train["label"] train_x = train.drop("label",axis = 1 )
Digit Recognizer
4,861,518
def prediction_for_clip(test_df: pd.DataFrame, clip: np.ndarray, model: ResNet, mel_params: dict, threshold=0.55): dataset = TestDataset(df=test_df, clip=clip, img_size=224, melspectrogram_parameters=mel_params) loader = data.DataLoader(dataset, batch_size=1, shuffle=False) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.eval() prediction_dict = {} for image, row_id, site in progress_bar(loader): site = site[0] row_id = row_id[0] if site in {"site_1", "site_2"}: image = image.to(device) with torch.no_grad() : prediction = model(image) proba = prediction["multilabel_proba"].detach().cpu().numpy().reshape(-1) events = proba >= threshold labels = np.argwhere(events ).reshape(-1 ).tolist() else: image = image.squeeze(0) batch_size = 16 whole_size = image.size(0) if whole_size % batch_size == 0: n_iter = whole_size // batch_size else: n_iter = whole_size // batch_size + 1 all_events = set() for batch_i in range(n_iter): batch = image[batch_i * batch_size:(batch_i + 1)* batch_size] if batch.ndim == 3: batch = batch.unsqueeze(0) batch = batch.to(device) with torch.no_grad() : prediction = model(batch) proba = prediction["multilabel_proba"].detach().cpu().numpy() events = proba >= threshold for i in range(len(events)) : event = events[i, :] labels = np.argwhere(event ).reshape(-1 ).tolist() for label in labels: all_events.add(label) labels = list(all_events) if len(labels)== 0: prediction_dict[row_id] = "nocall" else: labels_str_list = list(map(lambda x: INV_BIRD_CODE[x], labels)) label_string = " ".join(labels_str_list) prediction_dict[row_id] = label_string return prediction_dict<load_pretrained>
train_y = to_categorical(train_y )
Digit Recognizer
4,861,518
def prediction(test_df: pd.DataFrame, test_audio: Path, model_config: dict, mel_params: dict, weights_path: str, threshold=0.5): model = get_model(model_config, weights_path) unique_audio_id = test_df.audio_id.unique() warnings.filterwarnings("ignore") prediction_dfs = [] for audio_id in unique_audio_id: with timer(f"Loading {audio_id}", logger): clip, _ = librosa.load(test_audio /(audio_id + ".mp3"), sr=TARGET_SR, mono=True, res_type="kaiser_fast") test_df_for_audio_id = test_df.query( f"audio_id == '{audio_id}'" ).reset_index(drop=True) with timer(f"Prediction on {audio_id}", logger): prediction_dict = prediction_for_clip(test_df_for_audio_id, clip=clip, model=model, mel_params=mel_params, threshold=threshold) row_id = list(prediction_dict.keys()) birds = list(prediction_dict.values()) prediction_df = pd.DataFrame({ "row_id": row_id, "birds": birds }) prediction_dfs.append(prediction_df) prediction_df = pd.concat(prediction_dfs, axis=0, sort=False ).reset_index(drop=True) return prediction_df<save_to_csv>
model = Sequential()
Digit Recognizer
4,861,518
submission = prediction(test_df=test, test_audio=test_audio, model_config=model_config, mel_params=melspectrogram_parameters, weights_path=weights_path, threshold=0.85) submission.to_csv("submission.csv", index=False )<install_modules>
model.add(Conv2D(32,(3,3), strides=(1, 1), padding='same', activation="relu",input_shape =(28,28,1),data_format = "channels_last", use_bias = True)) model.add(Conv2D(32,(3,3), strides=(1, 1), padding='same', activation="relu", use_bias = True)) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='same')) model.add(Dropout(0.2)) model.add(Conv2D(64,(3,3), strides=(1, 1), padding='same', activation="relu", use_bias = True)) model.add(Conv2D(64,(3,3), strides=(1, 1), padding='same', activation="relu", use_bias = True)) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='same')) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(256,activation = "relu", use_bias = True)) model.add(Dropout(0.5)) model.add(Dense(10,activation = "softmax",use_bias = True))
Digit Recognizer
4,861,518
!pip install mlforecast<import_modules>
optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
4,861,518
from copy import copy from functools import partial from pathlib import Path import lightgbm as lgb import numpy as np import pandas as pd from mlforecast.core import TimeSeries from mlforecast.forecast import Forecast from window_ops.rolling import rolling_mean<load_from_csv>
model.compile(optimizer = optimizer,loss = "categorical_crossentropy",metrics = ['accuracy'] )
Digit Recognizer
4,861,518
input_path = Path('.. /input/m5-preprocess/processed/') data = pd.read_parquet(input_path/'sales.parquet') data<load_from_csv>
learning_rate_reduction = callbacks.ReduceLROnPlateau(monitor='loss',patience=3, verbose=1,factor=0.2,min_lr=0.00001 )
Digit Recognizer
4,861,518
prices = pd.read_parquet(input_path/'prices.parquet') prices<load_from_csv>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(train_x )
Digit Recognizer
4,861,518
cal = pd.read_parquet(input_path/'calendar.parquet') cal = cal.rename(columns={'date': 'ds'}) cal.head()<choose_model_class>
model.fit_generator(datagen.flow(train_x,train_y,batch_size = 100),epochs = 30,steps_per_epoch=train_x.shape[0] // 100, callbacks=[learning_rate_reduction] )
Digit Recognizer
4,861,518
lgb_params = { 'objective': 'poisson', 'metric': 'rmse', 'force_row_wise': True, 'learning_rate': 0.075, 'bagging_freq': 1, 'bagging_fraction': 0.75, 'lambda_l2': 0.1, 'n_estimators': 1200, 'num_leaves': 128, 'min_data_in_leaf': 100, } model = lgb.LGBMRegressor(**lgb_params) model<define_variables>
y_pred = model.predict(test )
Digit Recognizer
4,861,518
ts = TimeSeries( freq='D', lags=[7, 28], lag_transforms = { 7: [(rolling_mean, 7),(rolling_mean, 28)], 28: [(rolling_mean, 7),(rolling_mean, 28)], }, date_features=['year', 'month', 'day', 'dayofweek', 'quarter', 'week'], ) ts<prepare_output>
y_pred = np.array(y_pred )
Digit Recognizer
4,861,518
fcst = Forecast(model, ts )<define_variables>
y_pred_final = [] for i in y_pred: y_pred_final.append(np.argmax(i))
Digit Recognizer
4,861,518
<prepare_x_and_y><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("digit_mnist.csv",index=False )
Digit Recognizer
5,146,253
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model>
warnings.filterwarnings(category=FutureWarning, action="ignore") %matplotlib inline backend.set_image_data_format('channels_last') DATA_PATH = '.. /input/' SERIES = 'A' VERSION = 1 print('{:s}{:d}'.format(SERIES, VERSION))
Digit Recognizer
5,146,253
%time fcst.model.fit(X_train, y_train, eval_set=[(X_train, y_train),(X_valid, y_valid)], verbose=20 )<predict_on_test>
train_data = pd.read_csv(DATA_PATH+'train.csv') train_data.head()
Digit Recognizer
5,146,253
def my_predict_fn(model, new_x, features_order, alpha): new_x = new_x.reset_index() new_x = new_x.merge(cal) new_x = new_x.merge(prices) new_x = new_x.sort_values('unique_id') new_x = new_x[features_order] predictions = model.predict(new_x) return alpha * predictions<define_variables>
test_data = pd.read_csv(DATA_PATH+'test.csv') test_data.index =([x+1 for x in range(test_data.shape[0])]) print(test_data.shape) test_data.head()
Digit Recognizer
5,146,253
fcst.ts.num_threads<predict_on_test>
def get_model_params(layers)-> str: res = {} for layer in layers: lres = {} config = layer.get_config() for key in ['filters', 'kernel_size', 'activation', 'pool_size', 'padding', 'strides', 'rate', 'units', 'kernel_regularizer', 'batch_input_shape']: if key in config.keys() : lres[key] = config[key] res[layer.get_config() ['name']] = lres rep = '' for elem in res: rep += elem+': ' rep += str(res[elem])+' ' return rep
Digit Recognizer
5,146,253
%%time alphas = [1.028, 1.023, 1.018] preds = None for alpha in alphas: alpha_preds = fcst.predict(28, my_predict_fn, alpha=alpha) alpha_preds = alpha_preds.set_index('ds', append=True) if preds is None: preds = 1 / 3 * alpha_preds else: preds += 1 / 3 * alpha_preds preds<rename_columns>
x_train = train_data.iloc[:, 1:].values.reshape( (train_data.shape[0], 28, 28, 1)).astype('float32') x_train = x_train / 255.0 x_test = test_data.values.reshape( (test_data.shape[0], 28, 28, 1)).astype('float32') x_test = x_test / 255.0 lb = LabelBinarizer() y_train_ = lb.fit_transform(train_data.iloc[:, 0])
Digit Recognizer
5,146,253
wide = preds.reset_index().pivot_table(index='unique_id', columns='ds') wide.columns = [f'F{i+1}' for i in range(28)] wide.columns.name = None wide.index.name = 'id' wide<save_to_csv>
train_gen = ImageDataGenerator( rotation_range=9, zoom_range=0.09, width_shift_range=0.09, height_shift_range=0.11, validation_split=0.05 ) train_gen.fit(x_train) train_iterator = train_gen.flow( x=x_train, y=y_train_, batch_size=256, subset='training') val_iterator = train_gen.flow( x=x_train, y=y_train_, batch_size=256, subset='validation') train_x, train_y = train_iterator.next()
Digit Recognizer
5,146,253
sample_sub = pd.read_csv( '.. /input/m5-forecasting-accuracy/sample_submission.csv', index_col='id' ) sample_sub.update(wide) np.testing.assert_allclose(sample_sub.sum().sum() , preds['y_pred'].sum()) sample_sub.to_csv('submission.csv' )<set_options>
model = Sequential([ Conv2D(filters=128, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)) , MaxPooling2D(pool_size=(2, 2)) , Conv2D(filters=256, kernel_size=(3, 3), activation='relu'), MaxPooling2D(pool_size=(2, 2)) , Conv2D(filters=512, kernel_size=(4, 4), activation='relu'), MaxPooling2D(pool_size=(2, 2)) , Flatten() , Dense(units=256, activation='relu'), Dropout(rate=0.35), Dense(units=128, activation='relu'), Dense(units=64, activation='relu'), Dense(10, activation='softmax') ]) print(model.summary()) plot_model(model, show_shapes=True, show_layer_names=True,)
Digit Recognizer
5,146,253
warnings.filterwarnings("ignore") pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 100) DATA_PATH = '.. /input/jane-street-market-prediction/' NFOLDS = 5 TRAIN = False CACHE_PATH = '.. /input/mlp012003weights' def save_pickle(dic, save_path): with open(save_path, 'wb')as f: pickle.dump(dic, f) def load_pickle(load_path): with open(load_path, 'rb')as f: message_dict = pickle.load(f) return message_dict feat_cols = [f'feature_{i}' for i in range(130)] target_cols = ['action', 'action_1', 'action_2', 'action_3', 'action_4'] f_mean = np.load(f'{CACHE_PATH}/f_mean_online.npy') all_feat_cols = [col for col in feat_cols] all_feat_cols.extend(['cross_41_42_43', 'cross_1_2']) class Model(nn.Module): def __init__(self): super(Model, self ).__init__() self.batch_norm0 = nn.BatchNorm1d(len(all_feat_cols)) self.dropout0 = nn.Dropout(0.2) dropout_rate = 0.2 hidden_size = 256 self.dense1 = nn.Linear(len(all_feat_cols), hidden_size) self.batch_norm1 = nn.BatchNorm1d(hidden_size) self.dropout1 = nn.Dropout(dropout_rate) self.dense2 = nn.Linear(hidden_size+len(all_feat_cols), hidden_size) self.batch_norm2 = nn.BatchNorm1d(hidden_size) self.dropout2 = nn.Dropout(dropout_rate) self.dense3 = nn.Linear(hidden_size+hidden_size, hidden_size) self.batch_norm3 = nn.BatchNorm1d(hidden_size) self.dropout3 = nn.Dropout(dropout_rate) self.dense4 = nn.Linear(hidden_size+hidden_size, hidden_size) self.batch_norm4 = nn.BatchNorm1d(hidden_size) self.dropout4 = nn.Dropout(dropout_rate) self.dense5 = nn.Linear(hidden_size+hidden_size, len(target_cols)) self.Relu = nn.ReLU(inplace=True) self.PReLU = nn.PReLU() self.LeakyReLU = nn.LeakyReLU(negative_slope=0.01, inplace=True) self.RReLU = nn.RReLU() def forward(self, x): x = self.batch_norm0(x) x = self.dropout0(x) x1 = self.dense1(x) x1 = self.batch_norm1(x1) x1 = self.LeakyReLU(x1) x1 = self.dropout1(x1) x = torch.cat([x, x1], 1) x2 = self.dense2(x) x2 = self.batch_norm2(x2) x2 = self.LeakyReLU(x2) x2 = self.dropout2(x2) x = torch.cat([x1, x2], 1) x3 = self.dense3(x) x3 = self.batch_norm3(x3) x3 = self.LeakyReLU(x3) x3 = self.dropout3(x3) x = torch.cat([x2, x3], 1) x4 = self.dense4(x) x4 = self.batch_norm4(x4) x4 = self.LeakyReLU(x4) x4 = self.dropout4(x4) x = torch.cat([x3, x4], 1) x = self.dense5(x) return x if True: device = torch.device("cpu") model_list = [] tmp = np.zeros(len(feat_cols)) for _fold in range(NFOLDS): torch.cuda.empty_cache() model = Model() model.to(device) model_weights = f"{CACHE_PATH}/online_model{_fold}.pth" model.load_state_dict(torch.load(model_weights, map_location=torch.device('cpu'))) model.eval() model_list.append(model )<choose_model_class>
NEPOCHS = 300 early_stopping_cb = EarlyStopping(monitor='val_acc', min_delta=1e-5, patience=15, restore_best_weights=True) rl_reduce = ReduceLROnPlateau(monitor='val_loss', patience=10,factor=0.25,verbose=1,min_delta=1e-5) opt_rms = RMSprop(learning_rate=1e-3, centered=False) model.compile(loss='categorical_crossentropy', optimizer=opt_rms, metrics=['accuracy']) start = dt.datetime.now() history = model.fit_generator( generator=train_iterator, verbose=1, epochs=NEPOCHS, max_queue_size=10, validation_data=val_iterator, callbacks=[ early_stopping_cb, rl_reduce ] ) print(' Evaluation :') model.evaluate_generator(generator=val_iterator, verbose=1, ) print('Finished in: ', dt.datetime.now() -start)
Digit Recognizer
5,146,253
<choose_model_class><EOS>
test_gen = ImageDataGenerator( rotation_range=9, zoom_range=0.09, width_shift_range=0.09, height_shift_range=0.11, ) test_gen.fit(x_test) test_iterator = test_gen.flow(x=x_test, batch_size=len(x_test), shuffle=False) test_x = test_iterator.next() test_x res = model.predict(test_x) y_pred = pd.DataFrame([test_data.index, [x.argmax() for x in res]] ).T y_pred.columns = ['ImageId', 'Label'] y_pred.to_csv('digit_submission_2.csv', index=False)
Digit Recognizer
4,082,669
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_pretrained>
%matplotlib inline
Digit Recognizer
4,082,669
embNN_model = Emb_NN_Model() try: embNN_model.load_state_dict(torch.load(".. /input/jane-embnn5-auc-400-400-400/Jane_EmbNN5_auc_400_400_400.pth")) except: embNN_model.load_state_dict(torch.load(".. /input/jane-embnn5-auc-400-400-400/Jane_EmbNN5_auc_400_400_400.pth", map_location='cpu')) embNN_model = embNN_model.eval()<split>
path = Path('.. /input/') !ls.. /input
Digit Recognizer
4,082,669
env = janestreet.make_env() env_iter = env.iter_test()<concatenate>
class CustomImageItemList(ImageList): def open(self, fn): img = fn.reshape(28, 28) img = np.stack(( img,)*3, axis=-1) return Image(pil2tensor(img, dtype=np.float32)) @classmethod def from_csv_custom(cls, path:PathOrStr, csv_name:str, imgIdx:int=1, header:str='infer', **kwargs)-> 'ItemList': df = pd.read_csv(Path(path)/csv_name, header=header) res = super().from_df(df, path=path, cols=0, **kwargs) res.items = df.iloc[:,imgIdx:].apply(lambda x: x.values / 783.0, axis=1 ).values return res
Digit Recognizer
4,082,669
if True: for(test_df, pred_df)in tqdm(env_iter): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, feat_cols].values if np.isnan(x_tt.sum()): x_tt = np.nan_to_num(x_tt)+ np.isnan(x_tt)* f_mean cross_41_42_43 = x_tt[:, 41] + x_tt[:, 42] + x_tt[:, 43] cross_1_2 = x_tt[:, 1] /(x_tt[:, 2] + 1e-5) feature_inp = np.concatenate(( x_tt, np.array(cross_41_42_43 ).reshape(x_tt.shape[0], 1), np.array(cross_1_2 ).reshape(x_tt.shape[0], 1), ), axis=1) torch_pred = np.zeros(( 1, len(target_cols))) for model in model_list: torch_pred += model(torch.tensor(feature_inp, dtype=torch.float ).to(device)).sigmoid().detach().cpu().numpy() / NFOLDS torch_pred = np.median(torch_pred) tf_pred = np.median(np.mean([model(x_tt, training = False ).numpy() for model in tf_models],axis=0)) x_tt = torch.tensor(x_tt ).float().view(-1, 130) embnn_p = np.median(torch.sigmoid(embNN_model(None, x_tt)).detach().cpu().numpy().reshape(( -1, 5)) , axis=1) pred_pr = torch_pred*0.4 + tf_pred*0.4 + embnn_p*0.2 pred_df.action = np.where(pred_pr >= 0.5, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<load_pretrained>
test = CustomImageItemList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0) data =(CustomImageItemList.from_csv_custom(path=path, csv_name='train.csv') .random_split_by_pct (.2) .label_from_df(cols='label') .add_test(test, label=0) .databunch(bs=64, num_workers=0) .normalize(imagenet_stats))
Digit Recognizer
4,082,669
pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 100) DATA_PATH = '.. /input/jane-street-market-prediction/' NFOLDS = 5 TRAIN = False CACHE_PATH = '.. /input/mlp012003weights' def save_pickle(dic, save_path): with open(save_path, 'wb')as f: pickle.dump(dic, f) def load_pickle(load_path): with open(load_path, 'rb')as f: message_dict = pickle.load(f) return message_dict feat_cols = [f'feature_{i}' for i in range(130)] target_cols = ['action', 'action_1', 'action_2', 'action_3', 'action_4'] f_mean = np.load(f'{CACHE_PATH}/f_mean_online.npy') all_feat_cols = [col for col in feat_cols] all_feat_cols.extend(['cross_41_42_43', 'cross_1_2']) class Model(nn.Module): def __init__(self): super(Model, self ).__init__() self.batch_norm0 = nn.BatchNorm1d(len(all_feat_cols)) self.dropout0 = nn.Dropout(0.2) dropout_rate = 0.2 hidden_size = 256 self.dense1 = nn.Linear(len(all_feat_cols), hidden_size) self.batch_norm1 = nn.BatchNorm1d(hidden_size) self.dropout1 = nn.Dropout(dropout_rate) self.dense2 = nn.Linear(hidden_size+len(all_feat_cols), hidden_size) self.batch_norm2 = nn.BatchNorm1d(hidden_size) self.dropout2 = nn.Dropout(dropout_rate) self.dense3 = nn.Linear(hidden_size+hidden_size, hidden_size) self.batch_norm3 = nn.BatchNorm1d(hidden_size) self.dropout3 = nn.Dropout(dropout_rate) self.dense4 = nn.Linear(hidden_size+hidden_size, hidden_size) self.batch_norm4 = nn.BatchNorm1d(hidden_size) self.dropout4 = nn.Dropout(dropout_rate) self.dense5 = nn.Linear(hidden_size+hidden_size, len(target_cols)) self.Relu = nn.ReLU(inplace=True) self.PReLU = nn.PReLU() self.LeakyReLU = nn.LeakyReLU(negative_slope=0.01, inplace=True) self.RReLU = nn.RReLU() def forward(self, x): x = self.batch_norm0(x) x = self.dropout0(x) x1 = self.dense1(x) x1 = self.batch_norm1(x1) x1 = self.LeakyReLU(x1) x1 = self.dropout1(x1) x = torch.cat([x, x1], 1) x2 = self.dense2(x) x2 = self.batch_norm2(x2) x2 = self.LeakyReLU(x2) x2 = self.dropout2(x2) x = torch.cat([x1, x2], 1) x3 = self.dense3(x) x3 = self.batch_norm3(x3) x3 = self.LeakyReLU(x3) x3 = self.dropout3(x3) x = torch.cat([x2, x3], 1) x4 = self.dense4(x) x4 = self.batch_norm4(x4) x4 = self.LeakyReLU(x4) x4 = self.dropout4(x4) x = torch.cat([x3, x4], 1) x = self.dense5(x) return x if True: device = torch.device("cuda:0") model_list = [] tmp = np.zeros(len(feat_cols)) for _fold in range(NFOLDS): torch.cuda.empty_cache() model = Model() model.to(device) model_weights = f"{CACHE_PATH}/online_model{_fold}.pth" model.load_state_dict(torch.load(model_weights)) model.eval() model_list.append(model )<choose_model_class>
data.show_batch(rows=3, figsize=(6,6))
Digit Recognizer
4,082,669
SEED = 1111 np.random.seed(SEED) def create_mlp( num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate ): inp = tf.keras.layers.Input(shape=(num_columns,)) x = tf.keras.layers.BatchNormalization()(inp) x = tf.keras.layers.Dropout(dropout_rates[0] )(x) for i in range(len(hidden_units)) : x = tf.keras.layers.Dense(hidden_units[i] )(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(tf.keras.activations.swish )(x) x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x) x = tf.keras.layers.Dense(num_labels )(x) out = tf.keras.layers.Activation("sigmoid" )(x) model = tf.keras.models.Model(inputs=inp, outputs=out) model.compile( optimizer=tfa.optimizers.RectifiedAdam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing), metrics=tf.keras.metrics.AUC(name="AUC"), ) return model epochs = 200 batch_size = 4096 hidden_units = [160, 160, 160] dropout_rates = [0.2, 0.2, 0.2, 0.2] label_smoothing = 1e-2 learning_rate = 1e-3 tf.keras.backend.clear_session() tf.random.set_seed(SEED) clf = create_mlp( len(feat_cols), 5, hidden_units, dropout_rates, label_smoothing, learning_rate ) clf.load_weights('.. /input/jane-street-with-keras-nn-overfit/model.h5') tf_models = [clf]<statistical_test>
learn = cnn_learner(data, models.resnet50, metrics=accuracy, model_dir='/tmp/models') learn.lr_find()
Digit Recognizer
4,082,669
if True: env = janestreet.make_env() env_iter = env.iter_test() for(test_df, pred_df)in tqdm(env_iter): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, feat_cols].values if np.isnan(x_tt.sum()): x_tt = np.nan_to_num(x_tt)+ np.isnan(x_tt)* f_mean cross_41_42_43 = x_tt[:, 41] + x_tt[:, 42] + x_tt[:, 43] cross_1_2 = x_tt[:, 1] /(x_tt[:, 2] + 1e-5) feature_inp = np.concatenate(( x_tt, np.array(cross_41_42_43 ).reshape(x_tt.shape[0], 1), np.array(cross_1_2 ).reshape(x_tt.shape[0], 1), ), axis=1) torch_pred = np.zeros(( 1, len(target_cols))) for model in model_list: torch_pred += model(torch.tensor(feature_inp, dtype=torch.float ).to(device)).sigmoid().detach().cpu().numpy() / NFOLDS torch_pred = np.median(torch_pred) tf_pred = np.median(np.mean([model(x_tt, training = False ).numpy() for model in tf_models],axis=0)) pred = torch_pred * 0.5 + tf_pred * 0.5 pred_df.action = np.where(pred >= 0.4914, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<load_from_csv>
%time learn.fit(2,slice(1e-2))
Digit Recognizer
4,082,669
train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv' )<prepare_x_and_y>
learn.precompute=False learn.unfreeze()
Digit Recognizer
4,082,669
train = train.query('date > 85' ).reset_index(drop = True) train = train[train['weight'] != 0] features_mean = [] features = [c for c in train.columns if 'feature' in c] for i in features: x = train[i].mean() features_mean.append(x) train[i] = train[i].fillna(x) train['action'] =(( train['resp'].values)> 0 ).astype(int) resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] X_train = train.loc[:, train.columns.str.contains('feature')] y_train = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T<feature_engineering>
lr = np.array([0.001, 0.0075, 0.01] )
Digit Recognizer
4,082,669
f = np.median f_mean = np.mean(train[features[1:]].values,axis=0 )<drop_column>
learn.fit_one_cycle(9,slice(2e-3,2e-5), wd=.1 )
Digit Recognizer
4,082,669
del train<define_search_space>
test_pred, test_y, test_loss = learn.get_preds(ds_type=DatasetType.Test, with_loss=True )
Digit Recognizer
4,082,669
epochs = 200 batch_size = 4096 hidden_units = [160, 160] dropout_rates = [0.20, 0.20, 0.20] label_smoothing = 1e-2 learning_rate = 1e-3<choose_model_class>
submission_df = pd.DataFrame({'ImageId': range(1, len(test_y)+ 1), 'Label': result}, columns=['ImageId', 'Label']) submission_df.head()
Digit Recognizer
4,082,669
def create_mlp(num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate): inp = tf.keras.layers.Input(shape=(num_columns,)) x = tf.keras.layers.BatchNormalization()(inp) x = tf.keras.layers.Dropout(dropout_rates[0] )(x) for i in range(len(hidden_units)) : x = tf.keras.layers.Dense(hidden_units[i] )(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(tf.keras.activations.swish )(x) x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x) x = tf.keras.layers.Dense(num_labels )(x) out = tf.keras.layers.Activation("sigmoid" )(x) model = tf.keras.models.Model(inputs=inp, outputs=out) model.compile( optimizer = tfa.optimizers.RectifiedAdam(learning_rate=learning_rate), loss = tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing), metrics = tf.keras.metrics.AUC(name='AUC')) return model<train_model>
submission_df.to_csv("submission.csv",index=None )
Digit Recognizer
6,393,530
clf.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=2 )<drop_column>
import keras import numpy as np import pandas as pd
Digit Recognizer
6,393,530
del X_train del y_train<find_best_params>
train_df=pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test_df=pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
6,393,530
models = [] models.append(clf) th = 0.503<categorify>
target=train_df["label"] train_df.drop("label",axis=1,inplace=True )
Digit Recognizer
6,393,530
env = janestreet.make_env() for(test_df, pred_df)in tqdm(env.iter_test()): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, features].values if np.isnan(x_tt[:, 1:].sum()): x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean pred = np.mean([model(x_tt, training = False ).numpy() for model in models],axis=0) pred = f(pred) pred_df.action = np.where(pred >= th, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<set_options>
train_df=train_df/255 test_df=test_df/255
Digit Recognizer
6,393,530
warnings.filterwarnings("ignore") <load_from_csv>
X_train=train_df.values.reshape(-1,28,28,1) test=test_df.values.reshape(-1,28,28,1 )
Digit Recognizer
6,393,530
%%time train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv') train = train.astype({c: np.float32 for c in train.select_dtypes(include='float64' ).columns} )<data_type_conversions>
y_train=to_categorical(target,num_classes=10 )
Digit Recognizer
6,393,530
train.fillna(train.mean() ,inplace=True )<data_type_conversions>
X_train,X_test,y_train,y_test=train_test_split(X_train,y_train,test_size=0.10,random_state=42 )
Digit Recognizer
6,393,530
train['action'] =(train['resp'] > 0 ).astype('int' )<define_variables>
batch_size=128 num_classes=10 epochs=20 inputshape=(28,28,1 )
Digit Recognizer
6,393,530
resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp_4', 'resp']<split>
model=Sequential() model.add(Conv2D(32,kernel_size=(5,5),activation="relu",input_shape=inputshape)) model.add(Conv2D(64,(3,3),activation="relu")) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(128,kernel_size=(5,5),activation="relu")) model.add(Conv2D(128,(3,3),activation="relu")) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256,activation="relu")) model.add(Dense(128,activation="relu")) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax'))
Digit Recognizer
6,393,530
features_train_data = train.iloc[:,7:137]<define_variables>
reduce_learning_rate = ReduceLROnPlateau(monitor = 'val_accuracy', patience = 3, verbose = 1, factor = 0.3, min_lr = 0.00001) checkpoint = ModelCheckpoint('save_weights.h5', monitor = 'val_accuracy', verbose = 1, save_best_only = True, mode = 'max') early_stopping = EarlyStopping(monitor = 'val_loss', min_delta = 1e-10, patience = 10, verbose = 1, restore_best_weights = True) callbacks = [reduce_learning_rate, checkpoint, early_stopping]
Digit Recognizer
6,393,530
all_drop_cols = set(high_correlations.index.get_level_values(0))<compute_train_metric>
model.fit(X_train,y_train,batch_size=batch_size,epochs=epochs,validation_data=(X_test,y_test),callbacks=callbacks) accuracy=model.evaluate(X_test,y_test )
Digit Recognizer
6,393,530
<prepare_x_and_y><EOS>
pred = model.predict_classes(test) res = pd.DataFrame({"ImageId":list(range(1,28001)) ,"Label":pred}) res.to_csv("output.csv", index = False )
Digit Recognizer
6,732,418
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
%matplotlib inline np.random.seed(17) sns.set(style='white', context='notebook', palette='deep') for dirname, _, filenames in os.walk('/kaggle/input/digit-recognizer'): for filename in filenames: print(os.path.join(dirname, filename))
Digit Recognizer
6,732,418
HIDDEN_LAYER_1 = [256, 256] HIDDEN_LAYER_2 = [160, 160, 160] HIDDEN_LAYER_3 = [128, 128, 128, 128] TARGET_NUM = 5 input = tf.keras.layers.Input(shape=(X_train.shape[1],)) x1 = tf.keras.layers.BatchNormalization()(input) x1 = tf.keras.layers.Dropout(0.25 )(x1) for units in HIDDEN_LAYER_1: x1 = tf.keras.layers.Dense(units )(x1) x1 = tf.keras.layers.BatchNormalization()(x1) x1 = tf.keras.layers.Activation(tf.keras.activations.swish )(x1) x1 = tf.keras.layers.Dropout(0.25 )(x1) x2 = tf.keras.layers.BatchNormalization()(input) x2 = tf.keras.layers.Dropout(0.25 )(x2) for units in HIDDEN_LAYER_2: x2 = tf.keras.layers.Dense(units )(x2) x2 = tf.keras.layers.BatchNormalization()(x2) x2 = tf.keras.layers.Activation(tf.keras.activations.swish )(x2) x2 = tf.keras.layers.Dropout(0.25 )(x2) x3 = tf.keras.layers.BatchNormalization()(input) x3 = tf.keras.layers.Dropout(0.25 )(x3) for units in HIDDEN_LAYER_3: x3 = tf.keras.layers.Dense(units )(x3) x3 = tf.keras.layers.BatchNormalization()(x3) x3 = tf.keras.layers.Activation(tf.keras.activations.swish )(x3) x3 = tf.keras.layers.Dropout(0.25 )(x3) x = tf.keras.layers.concatenate([x1, x2, x3]) x = tf.keras.layers.Dense(TARGET_NUM )(x) output = tf.keras.layers.Activation("sigmoid" )(x) model = tf.keras.models.Model(inputs=input, outputs=output) model.compile( optimizer = tfa.optimizers.RectifiedAdam(learning_rate=1e-3), metrics = tf.keras.metrics.AUC(name="AUC"), loss = tf.keras.losses.BinaryCrossentropy(label_smoothing=1e-2), )<train_model>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
6,732,418
history = model.fit( x = X_train, y = y_train, epochs=25, batch_size=4096, validation_data=(X_valid, y_valid), ) models = [] models.append(model )<find_best_model_class>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) sns.countplot(Y_train) plt.show()
Digit Recognizer
6,732,418
THRESHOLD = 0.502 janestreet.make_env.__called__ = False env = janestreet.make_env() print('predicting...') for(test_df, pred_df)in tqdm(env.iter_test()): if test_df['weight'].item() > 0: X_test = test_df.loc[:, features].values if np.isnan(X_test.sum()): X_test = np.nan_to_num(X_test)+ np.isnan(X_test)* f_mean.values pred = model(X_test, training = False ).numpy() pred = np.mean(pred) pred_df.action = np.where(pred >= THRESHOLD, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<load_from_csv>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
6,732,418
SEED = 1111 np.random.seed(SEED) train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv') cols_to_remove = ['feature_26', 'feature_36', 'feature_24', 'feature_34', 'feature_12', 'feature_22', 'feature_32', 'feature_8', 'feature_18', 'feature_28', 'feature_108', 'feature_114', 'feature_101', 'feature_113', 'feature_107', 'feature_119', 'feature_4', 'feature_6', 'feature_15', 'feature_23', 'feature_25', 'feature_35', 'feature_33', 'feature_38', 'feature_40', 'feature_61', 'feature_66', 'feature_63', 'feature_68', 'feature_76', 'feature_88', 'feature_100', 'feature_112', 'feature_125', 'feature_129', 'feature_82', 'feature_106', 'feature_48', 'feature_57', 'feature_122', 'feature_126', 'feature_128', 'feature_124'] for col in cols_to_remove: del train[col] print(train) <prepare_x_and_y>
X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1 )
Digit Recognizer
6,732,418
train = train.query('date > 85' ).reset_index(drop = True) train = train[train['weight'] != 0] train.fillna(train.mean() ,inplace=True) train['action'] =(( train['resp'].values)> 0 ).astype(int) features = [c for c in train.columns if "feature" in c] f_mean = np.mean(train[features[1:]].values,axis=0) resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] X_train = train.loc[:, train.columns.str.contains('feature')] y_train = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T def create_mlp( num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate ): inp = tf.keras.layers.Input(shape=(num_columns,)) x = tf.keras.layers.BatchNormalization()(inp) x = tf.keras.layers.Dropout(dropout_rates[0] )(x) for i in range(len(hidden_units)) : x = tf.keras.layers.Dense(hidden_units[i] )(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(tf.keras.activations.swish )(x) x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x) x = tf.keras.layers.Dense(num_labels )(x) out = tf.keras.layers.Activation("sigmoid" )(x) model = tf.keras.models.Model(inputs=inp, outputs=out) model.compile( optimizer=tfa.optimizers.RectifiedAdam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing), metrics=tf.keras.metrics.AUC(name="AUC"), ) return model epochs = 200 batch_size = 4096 hidden_units = [160, 160, 160] dropout_rates = [0.2, 0.2, 0.2, 0.2] label_smoothing = 1e-2 learning_rate = 1e-3 tf.keras.backend.clear_session() tf.random.set_seed(SEED) clf = create_mlp( len(features), 5, hidden_units, dropout_rates, label_smoothing, learning_rate ) clf.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=2) clf.save(f'model.h5') th = 0.502 models = [clf] f = np.median env = janestreet.make_env() for(test_df, pred_df)in tqdm(env.iter_test()): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, features].values if np.isnan(x_tt[:, 1:].sum()): x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean pred = np.mean([model(x_tt, training = False ).numpy() for model in models],axis=0) pred = f(pred) pred_df.action = np.where(pred >= th, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<train_model>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
6,732,418
Image(".. /input/tf-model-garden-official-models/TF.png" )<import_modules>
random_seed = 2
Digit Recognizer
6,732,418
import tensorflow as tf from tensorflow.keras.layers import Input, Dense, BatchNormalization, Dropout, Concatenate, Lambda, GaussianNoise, Activation from tensorflow.keras.models import Model, Sequential from tensorflow.keras.losses import BinaryCrossentropy from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.layers.experimental.preprocessing import Normalization import tensorflow_addons as tfa from sklearn.model_selection import StratifiedKFold import numpy as np import pandas as pd from tqdm import tqdm import seaborn as sns import matplotlib.pyplot as plt<load_from_csv>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed )
Digit Recognizer
6,732,418
NFOLDS = 5 train_all = pd.read_csv('.. /input/jane-street-market-prediction/train.csv') train_all = train_all[train_all.date > 85].reset_index(drop = True) train_all = train_all[train_all['weight'] != 0] train_all.fillna(train_all.mean() ,inplace=True )<prepare_x_and_y>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax")) model.summary()
Digit Recognizer
6,732,418
train_all['date_bin'] =(pd.qcut(train_all['date'], q=4, labels=False)+1)*train_all['feature_0'] features = [c for c in train_all.columns if "feature" in c] f_mean = np.mean(train_all[features[1:]].values,axis=0) resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] X_train = train_all.loc[:, train_all.columns.str.contains('feature|date_bin')] y_train = pd.DataFrame(np.stack([(train_all[c] > 0 ).astype('int')for c in resp_cols] ).T, columns = resp_cols )<split>
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True) Image("model.png" )
Digit Recognizer
6,732,418
skf = StratifiedKFold(n_splits=NFOLDS, shuffle = True, random_state = 42) result = next(skf.split(X_train, X_train.date_bin), None) train = train_all.iloc[result[0]].reset_index(drop=True) valid = train_all.iloc[result[1]].reset_index(drop=True )<drop_column>
model.compile(optimizer = 'nadam' , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
6,732,418
del train, valid, train_all, result<choose_model_class>
callbacks_list = [ ReduceLROnPlateau( monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=1e-05), ModelCheckpoint( filepath='MNIST_CNN_model.h5', monitor='val_accuracy', save_best_only=True )]
Digit Recognizer
6,732,418
MNAME = 'model' def get_callbacks(idx): mc = ModelCheckpoint(MNAME+"-{}.h5".format(idx), save_best_only=True) rp = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.00001) es = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=False) return [mc, rp, es] def create_dnn(num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate): inp = tf.keras.layers.Input(shape=(num_columns,)) x = tf.keras.layers.BatchNormalization()(inp) x = tf.keras.layers.Dropout(dropout_rates[0] )(x) for i in range(len(hidden_units)) : x = tf.keras.layers.Dense(hidden_units[i] )(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(tf.keras.activations.swish )(x) x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x) x = tf.keras.layers.Dense(num_labels )(x) out = tf.keras.layers.Activation("sigmoid" )(x) model = tf.keras.models.Model(inputs=inp, outputs=out) return model<prepare_x_and_y>
epochs = 50 batch_size = 512
Digit Recognizer
6,732,418
skf = StratifiedKFold(n_splits=NFOLDS, shuffle = True, random_state = 42) history = [] for i in range(NFOLDS): print('fold {}'.format(i)) result = next(skf.split(X_train, X_train.date_bin), None) X_tr = X_train.iloc[result[0]].reset_index(drop=True) X_tr.drop(labels='date_bin', axis = 1, inplace=True) y_tr = y_train.iloc[result[0]].reset_index(drop=True) X_val = X_train.iloc[result[1]].reset_index(drop=True) X_val.drop(labels='date_bin', axis=1, inplace=True) y_val = y_train.iloc[result[1]].reset_index(drop=True) np.random.seed(42*i) tf.keras.backend.clear_session() tf.random.set_seed(42*i) batch_size = 8192 hidden_units = [160, 160, 160] dropout_rates = [0.2, 0.2, 0.2, 0.2] label_smoothing = 1e-2 learning_rate = 1e-3 clf = create_dnn(len(features), y_train.shape[1], hidden_units, dropout_rates, label_smoothing, learning_rate) clf.compile( optimizer=tfa.optimizers.RectifiedAdam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing), metrics=tf.keras.metrics.AUC(name="AUC"), ) callbacks = get_callbacks(i) epochs = 200 history.append(clf.fit(X_tr, y_tr, epochs=epochs, batch_size=batch_size, validation_data=(X_val,y_val), callbacks=callbacks, verbose=0)) del clf, X_tr, y_tr, X_val, y_val, result<set_options>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
6,732,418
env = janestreet.make_env()<correct_missing_values>
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size, callbacks=callbacks_list )
Digit Recognizer
6,732,418
@njit def fillna_npwhere_njit(array, values): if np.isnan(array.sum()): array = np.where(np.isnan(array), values, array) return array<load_pretrained>
model = load_model('MNIST_CNN_model.h5' )
Digit Recognizer
6,732,418
th = 0.501 clf0 = tf.keras.models.load_model("model-0.h5") clf2 = tf.keras.models.load_model("model-2.h5") clf4 = tf.keras.models.load_model("model-4.h5") models = [clf0, clf2, clf4] test_df_columns = ['weight'] + [f'feature_{i}' for i in range(130)] + ['date'] index_features = [n for n,col in enumerate(test_df_columns)if col in features] for(test_df, pred_df)in tqdm(env.iter_test()): if test_df['weight'].values[0] > 0: x_tt = test_df.values[0][index_features].reshape(1,-1) x_tt[:, 1:] = fillna_npwhere_njit(x_tt[:, 1:][0], f_mean) pred = np.median(np.mean([model(x_tt, training = False ).numpy() for model in models],axis=0)) pred_df.action = int(pred >= th) else: pred_df.action = 0 env.predict(pred_df )<set_options>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
6,732,418
<load_from_csv><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
7,429,783
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import keras import tensorflow from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,accuracy_score from keras.models import Sequential from keras.layers import Convolution2D,MaxPooling2D,Flatten,Dense,Dropout from keras.utils.np_utils import to_categorical from keras.datasets import mnist
Digit Recognizer
7,429,783
class UtilityScoreCallback(tf.keras.callbacks.Callback): def __init__(self, X, date, weight, resp, batch_size = 1024, early_stopping_patience = 30, plateau_patience = 10, min_lr = 1e-6, reduction_rate = 0.3, stage = 'train', fold_n = 0, verbose = 1): super(Callback, self ).__init__() self.X = X self.date = date self.weight = weight self.resp = resp self.history = [] self.batch_size = batch_size self.early_stopping_patience = early_stopping_patience self.plateau_patience = plateau_patience self.min_lr = min_lr self.reduction_rate = reduction_rate self.fold_n = fold_n self.stage = stage self.verbose = verbose self.best_uscore = - float('inf') self.checkpoints_path = f'models_1.0/' if not os.path.exists(self.checkpoints_path): os.makedirs(self.checkpoints_path) def utility_score(self, d, w, r, a): Pi = np.bincount(d, w * r * a) t = np.sum(Pi)/ np.sqrt(np.sum(Pi ** 2)) * np.sqrt(250 / len(Pi)) u = min(max(t, 0), 6)* np.sum(Pi) return u def is_patience_lost(self, patience): if len(self.history)> patience: best_performance = max(self.history[-(patience + 1):-1]) return best_performance == self.history[-(patience + 1)] and best_performance >= self.history[-1] def early_stopping_check(self, uscore): if self.is_patience_lost(self.early_stopping_patience): self.model.stop_training = True def model_checkpoint(self, uscore, epoch): if uscore > self.best_uscore: for checkpoint in glob.glob(os.path.join(self.checkpoints_path, f'JSModel_{self.fold_n}*')) : os.remove(checkpoint) self.best_uscore = uscore self.model.save(os.path.join(self.checkpoints_path, f'ResnetModel_{self.fold_n}.hdf5')) if self.verbose: print(f" {' Saved new checkpoint with Score {self.best_uscore} {' ") def reduce_lr_on_plateau(self): if self.is_patience_lost(self.plateau_patience): new_lr = max(float(K.get_value(self.model.optimizer.lr)) * self.reduction_rate, self.min_lr) K.set_value(self.model.optimizer.lr, new_lr) if self.verbose: print(f" {' Reduced learning rate to {new_lr}. {' ") def on_epoch_end(self, epoch, logs={}): y_preds = self.model.predict(self.X, batch_size=self.batch_size) action_proba = np.median(y_preds, axis=1) action = np.where(action_proba >= 0.5, 1, 0) uscore = self.utility_score(self.date, self.weight, self.resp, action) self.history.append(uscore) if self.stage == 'val': self.early_stopping_check(uscore) self.model_checkpoint(uscore, epoch) if self.plateau_patience >= 0: self.reduce_lr_on_plateau() def get_uscore_history(self): return self.history<choose_model_class>
df_train=pd.read_csv('.. /input/digit-recognizer/train.csv') df_test=pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
7,429,783
def create_resnet(num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate): inp = tf.keras.layers.Input(shape=(num_columns,)) x = tf.keras.layers.BatchNormalization()(inp) x = tf.keras.layers.Dropout(dropout_rates[0] )(x) for i in range(len(hidden_units)) : x = tf.keras.layers.Dense(hidden_units[i] )(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(tf.keras.activations.swish )(x) x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x) x_1 = tf.keras.layers.Concatenate(axis=-1 )([x, inp]) for i in range(len(hidden_units)) : x_1 = tf.keras.layers.Dense(hidden_units[i] )(x_1) x_1 = tf.keras.layers.BatchNormalization()(x_1) x_1 = tf.keras.layers.Activation(tf.keras.activations.swish )(x_1) x_1 = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x_1) x_2 = tf.keras.layers.Concatenate(axis=-1 )([x_1, x]) for i in range(len(hidden_units)) : x_2 = tf.keras.layers.Dense(hidden_units[i] )(x_2) x_2 = tf.keras.layers.BatchNormalization()(x_2) x_2 = tf.keras.layers.Activation(tf.keras.activations.swish )(x_2) x_2 = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x_2) x_3 = tf.keras.layers.Concatenate(axis=-1 )([x_1, x_2]) x_3 = tf.keras.layers.Dense(256 )(x_3) x_3 = tf.keras.layers.BatchNormalization()(x_3) x_3 = tf.keras.layers.Activation(tf.keras.activations.swish )(x_3) x_3 = tf.keras.layers.Dropout(0.2 )(x_3) x_3 = tf.keras.layers.Dense(num_labels )(x_3) out = tf.keras.layers.Activation("sigmoid" )(x_3) model = tf.keras.models.Model(inputs=inp, outputs=out) model.compile( optimizer=tfa.optimizers.RectifiedAdam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing), metrics=tf.keras.metrics.AUC(name="AUC"), ) return model<define_search_space>
print(type(df_test)) print(type(df_train)) df_test.isnull().sum() df_train.isnull().sum() df_test.isnull().sum().sum() df_train.isnull().sum().sum()
Digit Recognizer
7,429,783
batch_size = 2048 hidden_units = [150, 150, 150] dropout_rates = [0.25, 0.25, 0.25, 0.25] label_smoothing = 1e-3 learning_rate = 1e-3 folds = 5 train_mode = True opt_th_cross = 0.5<train_model>
classifier = Sequential() classifier.add(Convolution2D(filters = 128, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(BatchNormalization()) classifier.add(Convolution2D(filters = 128, kernel_size =(5,5),padding = 'Same', activation ='relu')) classifier.add(BatchNormalization()) classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Dropout(0.25))
Digit Recognizer
7,429,783
if train_mode: clf = create_resnet(len(features), 5, hidden_units, dropout_rates, label_smoothing, learning_rate) clf.fit(train.loc[:, features].values,(train.loc[:,resp_cols] > 0 ).astype(int), epochs=150, batch_size=batch_size, shuffle=True) <categorify>
classifier.add(Convolution2D(filters =256, kernel_size =(3,3),padding = 'Same', activation ='relu')) classifier.add(BatchNormalization()) classifier.add(MaxPooling2D(pool_size=(2,2))) classifier.add(Convolution2D(filters = 256, kernel_size =(3,3),padding = 'Same', activation ='relu')) classifier.add(BatchNormalization()) classifier.add(MaxPooling2D(pool_size=(2,2), strides=(2,2))) classifier.add(Dropout(0.25))
Digit Recognizer
7,429,783
models = [] clf.call = tf.function(clf.call, experimental_relax_shapes=True) models.append(clf )<split>
classifier.add(Flatten()) classifier.add(Dense(256, activation = "relu")) classifier.add(Dropout(0.3)) classifier.add(Dense(10, activation = "softmax"))
Digit Recognizer
7,429,783
env = janestreet.make_env() env_iter = env.iter_test()<feature_engineering>
classifier.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'] )
Digit Recognizer
7,429,783
for(test_df, pred_df)in tqdm(env_iter): if test_df['weight'].values[0] > 0: test_df = test_df.loc[:, features].values if np.isnan(test_df[:, 1:].sum()): test_df[:, 1:] = np.nan_to_num(test_df[:, 1:])+ np.isnan(test_df[:, 1:])* f_mean pred = np.mean([model(test_df, training = False ).numpy() for model in models],axis=0) pred_df.action = int(0.6*pred[:,3] + 0.2*pred[:,2] + 0.2*pred[:,4]> opt_th_cross) else: pred_df["action"].values[0] = 0 env.predict(pred_df )<set_options>
classifier.fit(target,label_cat,epochs=50 )
Digit Recognizer