kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
6,632,007
submission = pd.DataFrame({ "PassengerId": test_df["PassengerId"], "Survived": Y_pred }) submission.to_csv('submission.csv', index=False )<set_options>
def kaggle_acc(y_true, y_pred0, pix=0.5, area=24000, dim=(352,544)) : y_pred = K.cast(K.greater(y_pred0,pix), K.floatx()) s = K.sum(y_pred, axis=(1,2)) s = K.cast(K.greater(s, area), K.floatx()) s = K.reshape(s,(-1,1)) s = K.repeat(s,dim[0]*dim[1]) s = K.reshape(s,(-1,1)) y_pred = K.permute_dimensions(y_pred,(0,3,1,2)) y_pred = K.reshape(y_pred,shape=(-1,1)) y_pred = s*y_pred y_pred = K.reshape(y_pred,(-1,y_pred0.shape[3],dim[0],dim[1])) y_pred = K.permute_dimensions(y_pred,(0,2,3,1)) total_y_true = K.sum(y_true, axis=(1,2)) total_y_true = K.cast(K.greater(total_y_true, 0), K.floatx()) total_y_pred = K.sum(y_pred, axis=(1,2)) total_y_pred = K.cast(K.greater(total_y_pred, 0), K.floatx()) return 1 - K.mean(K.abs(total_y_pred - total_y_true)) def kaggle_dice(y_true, y_pred0, pix=0.5, area=24000, dim=(352,544)) : y_pred = K.cast(K.greater(y_pred0,pix), K.floatx()) s = K.sum(y_pred, axis=(1,2)) s = K.cast(K.greater(s, area), K.floatx()) s = K.reshape(s,(-1,1)) s = K.repeat(s,dim[0]*dim[1]) s = K.reshape(s,(-1,1)) y_pred = K.permute_dimensions(y_pred,(0,3,1,2)) y_pred = K.reshape(y_pred,shape=(-1,1)) y_pred = s*y_pred y_pred = K.reshape(y_pred,(-1,y_pred0.shape[3],dim[0],dim[1])) y_pred = K.permute_dimensions(y_pred,(0,2,3,1)) intersection = K.sum(y_true * y_pred, axis=(1,2)) total_y_true = K.sum(y_true, axis=(1,2)) total_y_pred = K.sum(y_pred, axis=(1,2)) return K.mean(( 2*intersection+1e-9)/(total_y_true+total_y_pred+1e-9))
Understanding Clouds from Satellite Images
6,632,007
warnings.filterwarnings('ignore' )<load_from_csv>
filters = [256, 128, 64, 32, 16] REDUCTION = 0; RED = 2**REDUCTION filters = filters[:5-REDUCTION] BATCH_SIZE = 16 jaccard_loss = sm.losses.JaccardLoss() skf = KFold(n_splits=3, shuffle=True, random_state=RAND) for k,(idxT0, idxV0)in enumerate(skf.split(train2)) : train_idx = train2.index[idxT0] val_idx = train2.index[idxV0] if k==0: idx_oof_0 = val_idx.copy() elif k==1: idx_oof_1 = val_idx.copy() elif k==2: idx_oof_2 = val_idx.copy() print(' print(' print(' if not DO_TRAIN: continue train_generator = DataGenerator2( train_idx, flips=True, augment=True, shuffle=True, shrink2=RED, batch_size=BATCH_SIZE, ) val_generator = DataGenerator2( val_idx, shrink2=RED, batch_size=BATCH_SIZE ) opt = AdamAccumulate(lr=0.001, accum_iters=8) model = sm.Unet( 'efficientnetb2', classes=4, encoder_weights='imagenet', decoder_filters = filters, input_shape=(None, None, 3), activation='sigmoid' ) model.compile(optimizer=opt, loss=jaccard_loss, metrics=[dice_coef,kaggle_dice,kaggle_acc]) checkpoint = ModelCheckpoint('model_'+str(k)+'.h5', save_best_only=True) es = EarlyStopping(monitor='val_dice_coef', min_delta=0.001, patience=5, verbose=1, mode='max') rlr = ReduceLROnPlateau(monitor='val_dice_coef', factor=0.5, patience=2, verbose=1, mode='max', min_delta=0.001) history = model.fit_generator( train_generator, validation_data=val_generator, callbacks=[rlr, es, checkpoint], epochs=30, verbose=2, workers=2 ) history_df = pd.DataFrame(history.history) history_df.to_csv('history_'+str(k)+'.csv', index=False) del train_idx, val_idx, train_generator, val_generator, opt, model, checkpoint, es, rlr, history, history_df K.clear_session() ; x=gc.collect()
Understanding Clouds from Satellite Images
6,632,007
raw_data = pd.read_csv(".. /input/train.csv") raw_test = pd.read_csv('.. /input/test.csv' )<compute_test_metric>
!pip install tta-wrapper --quiet if DO_TEST: model1 = load_model('model_0.h5',custom_objects={'dice_coef':dice_coef, 'jaccard_loss':jaccard_loss,'AdamAccumulate':AdamAccumulate, 'kaggle_dice':kaggle_dice,'kaggle_acc':kaggle_acc}) if USE_TTA: model1 = tta_segmentation(model1, h_flip=True, h_shift=(-10, 10), v_flip=True, v_shift=(-10, 10), merge='mean') model2 = load_model('model_1.h5',custom_objects={'dice_coef':dice_coef, 'jaccard_loss':jaccard_loss,'AdamAccumulate':AdamAccumulate, 'kaggle_dice':kaggle_dice,'kaggle_acc':kaggle_acc}) if USE_TTA: model2 = tta_segmentation(model2, h_flip=True, h_shift=(-10, 10), v_flip=True, v_shift=(-10, 10), merge='mean') model3 = load_model('model_2.h5',custom_objects={'dice_coef':dice_coef, 'jaccard_loss':jaccard_loss,'AdamAccumulate':AdamAccumulate, 'kaggle_dice':kaggle_dice,'kaggle_acc':kaggle_acc}) if USE_TTA: model3 = tta_segmentation(model3, h_flip=True, h_shift=(-10, 10), v_flip=True, v_shift=(-10, 10), merge='mean' )
Understanding Clouds from Satellite Images
6,632,007
<count_missing_values><EOS>
print('Computing masks for',len(sub)//4,'test images with 3 models'); sub.EncodedPixels = '' PTH = '.. /input/cloud-images-resized/test_images_384x576/'; bs = 4 if USE_TTA: bs=1 test_gen = DataGenerator2(sub.Image[::4].values, width=576, height=384, batch_size=bs, mode='predict',path=PTH) sz = 20000.*(576/525)*(384/350)/RED/RED pixt = [0.5,0.5,0.5,0.35] szt = [25000., 20000., 22500., 15000.] for k in range(len(szt)) : szt[k] = szt[k]*(576./525.) *(384./350.) /RED/RED if DO_TEST: for b,batch in enumerate(test_gen): btc = model1.predict_on_batch(batch) btc += model2.predict_on_batch(batch) btc += model3.predict_on_batch(batch) btc /= 3.0 for j in range(btc.shape[0]): for i in range(btc.shape[-1]): mask =(btc[j,:,:,i]>pixt[i] ).astype(int); rle = '' if np.sum(mask)>szt[i]: rle = mask2rleXXX(mask ,shape=(576//RED,384//RED)) sub.iloc[4*(bs*b+j)+i,1] = rle if b%(100//bs)==0: print(b*bs,', ',end='') t = np.round(( time.time() - kernel_start)/60,1) if t > LIMIT*60: print(' break sub[['Image_Label','EncodedPixels']].to_csv('sub_seg.csv',index=False) sub.loc[(sub.p<0.5)&(sub.Label=='Fish'),'EncodedPixels'] = '' sub.loc[(sub.p<0.3)&(sub.Label=='Flower'),'EncodedPixels'] = '' sub.loc[(sub.p<0.5)&(sub.Label=='Gravel'),'EncodedPixels'] = '' sub.loc[(sub.p<0.5)&(sub.Label=='Sugar'),'EncodedPixels'] = '' sub[['Image_Label','EncodedPixels']].to_csv('submission.csv',index=False) sub.head(10 )
Understanding Clouds from Satellite Images
5,920,350
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<data_type_conversions>
! pip install git+https://github.com/qubvel/classification_models.git
Understanding Clouds from Satellite Images
5,920,350
raw_data['IsAgeNull'] = np.where(np.isnan(raw_data['Age']), 1, 0) raw_data['Age'].fillna(( raw_data['Age'].mean()), inplace=True) raw_data['Age'] = raw_data['Age'].round().astype(int )<feature_engineering>
seed(10) set_random_seed(10) %matplotlib inline
Understanding Clouds from Satellite Images
5,920,350
raw_data['AgeLabel'] = pd.cut(raw_data['Age'], bins=np.arange(start=0, stop=90, step=10), labels=np.arange(start=0, stop=8, step=1), include_lowest=True )<feature_engineering>
from classification_models.keras import Classifiers
Understanding Clouds from Satellite Images
5,920,350
raw_data['FareLabel'] = pd.cut(raw_data.Fare, bins=np.arange(start=0, stop=600, step=50), precision=0, labels=np.arange(start=0, stop=11, step=1), include_lowest=True )<categorify>
test_imgs_folder = '.. /input/understanding_cloud_organization/test_images/' train_imgs_folder = '.. /input/understanding_cloud_organization/train_images/' num_cores = multiprocessing.cpu_count()
Understanding Clouds from Satellite Images
5,920,350
raw_data = pd.concat([raw_data, pd.get_dummies(raw_data['Sex'])], axis=1 )<categorify>
train_df = pd.read_csv('.. /input/understanding_cloud_organization/train.csv') train_df.head()
Understanding Clouds from Satellite Images
5,920,350
raw_data = pd.concat([raw_data, pd.get_dummies(raw_data['Title'], prefix='title')], axis=1 )<categorify>
train_df = train_df[~train_df['EncodedPixels'].isnull() ] train_df['Image'] = train_df['Image_Label'].map(lambda x: x.split('_')[0]) train_df['Class'] = train_df['Image_Label'].map(lambda x: x.split('_')[1]) classes = train_df['Class'].unique() train_df = train_df.groupby('Image')['Class'].agg(set ).reset_index() for class_name in classes: train_df[class_name] = train_df['Class'].map(lambda x: 1 if class_name in x else 0) train_df.head()
Understanding Clouds from Satellite Images
5,920,350
raw_data = pd.concat([raw_data, pd.get_dummies(raw_data['Pclass'], prefix='Pclass')], axis=1 )<prepare_x_and_y>
img_2_ohe_vector = {img:vec for img, vec in zip(train_df['Image'], train_df.iloc[:, 2:].values)}
Understanding Clouds from Satellite Images
5,920,350
need_columns = [ 'female', 'male', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'title_Master', 'title_Miss', 'title_Mr', 'title_Mrs', 'title_Rare', 'AgeLabel', 'IsAgeNull', 'IsChildren', 'FareLabel', 'SibSp', 'Parch', 'FamilySize', 'NoFamily', 'SmallFamily', 'MediumFamily', 'LargeFamily', 'LowFare', 'HighFare', 'MediumFare', 'NoCabin' ] data = raw_data[need_columns] x = data y = raw_data.Survived <define_search_space>
albumentations_train = Compose([ VerticalFlip() , HorizontalFlip() , Rotate(limit=30), GridDistortion() ], p=1 )
Understanding Clouds from Satellite Images
5,920,350
n_estimators = [int(x)for x in np.linspace(start = 100, stop = 2000, num = 20)] max_features = ['auto', 'sqrt', 'log2', None] max_depth = [int(x)for x in np.linspace(10, 200, num = 20)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] max_leaf_nodes = [2, 5, 8, 10, None] criterion=['gini', 'entropy'] random_grid = { 'n_estimators': n_estimators, 'criterion': criterion, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap, 'max_leaf_nodes': max_leaf_nodes } estimator = RandomForestClassifier()<train_on_grid>
train_imgs, val_imgs = train_test_split(train_df['Image'].values, test_size=0.2, stratify=train_df['Class'].map(lambda x: str(sorted(list(x)))) , random_state=10 )
Understanding Clouds from Satellite Images
5,920,350
rf_random = RandomizedSearchCV( estimator=estimator, param_distributions=random_grid, random_state=42, n_jobs=-1 ) rf_random.fit(x, y )<find_best_params>
class DataGenenerator(Sequence): def __init__(self, images_list=None, folder_imgs=train_imgs_folder, batch_size=8, shuffle=True, augmentation=None, resized_height=224, resized_width=224, num_channels=3): self.batch_size = batch_size self.shuffle = shuffle self.augmentation = augmentation if images_list is None: self.images_list = os.listdir(folder_imgs) else: self.images_list = deepcopy(images_list) self.folder_imgs = folder_imgs self.len = len(self.images_list)// self.batch_size self.resized_height = resized_height self.resized_width = resized_width self.num_channels = num_channels self.num_classes = 4 self.is_test = not 'train' in folder_imgs if not shuffle and not self.is_test: self.labels = [img_2_ohe_vector[img] for img in self.images_list[:self.len*self.batch_size]] def __len__(self): return self.len def on_epoch_start(self): if self.shuffle: random.shuffle(self.images_list) def __getitem__(self, idx): current_batch = self.images_list[idx * self.batch_size:(idx + 1)* self.batch_size] X = np.empty(( self.batch_size, self.resized_height, self.resized_width, self.num_channels)) y = np.empty(( self.batch_size, self.num_classes)) for i, image_name in enumerate(current_batch): path = os.path.join(self.folder_imgs, image_name) img = cv2.resize(cv2.imread(path),(self.resized_height, self.resized_width)).astype(np.float32) if not self.augmentation is None: augmented = self.augmentation(image=img) img = augmented['image'] X[i, :, :, :] = img/255.0 if not self.is_test: y[i, :] = img_2_ohe_vector[image_name] return X, y def get_labels(self): if self.shuffle: images_current = self.images_list[:self.len*self.batch_size] labels = [img_2_ohe_vector[img] for img in images_current] else: labels = self.labels return np.array(labels )
Understanding Clouds from Satellite Images
5,920,350
best_params = rf_random.best_params_ print(best_params )<find_best_score>
data_generator_train = DataGenenerator(train_imgs, augmentation=albumentations_train) data_generator_train_eval = DataGenenerator(train_imgs, shuffle=False) data_generator_val = DataGenenerator(val_imgs, shuffle=False )
Understanding Clouds from Satellite Images
5,920,350
best_score = rf_random.best_score_ print("best score {}".format(best_score))<choose_model_class>
class PrAucCallback(Callback): def __init__(self, data_generator, num_workers=num_cores, early_stopping_patience=5, plateau_patience=3, reduction_rate=0.5, stage='train', checkpoints_path='checkpoints/'): super(Callback, self ).__init__() self.data_generator = data_generator self.num_workers = num_workers self.class_names = ['Fish', 'Flower', 'Sugar', 'Gravel'] self.history = [[] for _ in range(len(self.class_names)+ 1)] self.early_stopping_patience = early_stopping_patience self.plateau_patience = plateau_patience self.reduction_rate = reduction_rate self.stage = stage self.best_pr_auc = -float('inf') if not os.path.exists(checkpoints_path): os.makedirs(checkpoints_path) self.checkpoints_path = checkpoints_path def compute_pr_auc(self, y_true, y_pred): pr_auc_mean = 0 print(f" {' ") for class_i in range(len(self.class_names)) : precision, recall, _ = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i]) pr_auc = auc(recall, precision) pr_auc_mean += pr_auc/len(self.class_names) print(f"PR AUC {self.class_names[class_i]}, {self.stage}: {pr_auc:.3f} ") self.history[class_i].append(pr_auc) print(f" {' PR AUC mean, {self.stage}: {pr_auc_mean:.3f} {' ") self.history[-1].append(pr_auc_mean) return pr_auc_mean def is_patience_lost(self, patience): if len(self.history[-1])> patience: best_performance = max(self.history[-1][-(patience + 1):-1]) return best_performance == self.history[-1][-(patience + 1)] and best_performance >= self.history[-1][-1] def early_stopping_check(self, pr_auc_mean): if self.is_patience_lost(self.early_stopping_patience): self.model.stop_training = True def model_checkpoint(self, pr_auc_mean, epoch): if pr_auc_mean > self.best_pr_auc: for checkpoint in glob.glob(os.path.join(self.checkpoints_path, 'classifier_densenet169_epoch_*')) : os.remove(checkpoint) self.best_pr_auc = pr_auc_mean self.model.save(os.path.join(self.checkpoints_path, f'classifier_densenet169_epoch_{epoch}_val_pr_auc_{pr_auc_mean}.h5')) print(f" {' Saved new checkpoint {' ") def reduce_lr_on_plateau(self): if self.is_patience_lost(self.plateau_patience): new_lr = float(keras.backend.get_value(self.model.optimizer.lr)) * self.reduction_rate keras.backend.set_value(self.model.optimizer.lr, new_lr) print(f" {' Reduced learning rate to {new_lr}. {' ") def on_epoch_end(self, epoch, logs={}): y_pred = self.model.predict_generator(self.data_generator, workers=self.num_workers) y_true = self.data_generator.get_labels() pr_auc_mean = self.compute_pr_auc(y_true, y_pred) if self.stage == 'val': self.early_stopping_check(pr_auc_mean) self.model_checkpoint(pr_auc_mean, epoch) self.reduce_lr_on_plateau() def get_pr_auc_history(self): return self.history
Understanding Clouds from Satellite Images
5,920,350
test_model = RandomForestClassifier( n_estimators=best_params['n_estimators'], criterion=best_params['criterion'], max_features=best_params['max_features'], max_depth=best_params['max_depth'], min_samples_split=best_params['min_samples_split'], min_samples_leaf=best_params['min_samples_leaf'], max_leaf_nodes=best_params['max_leaf_nodes'], bootstrap=best_params['bootstrap'] )<compute_train_metric>
def get_model() : K.clear_session() seresnext101, preprocess_input = Classifiers.get('seresnext101') base_model = seresnext101(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) x = base_model.output x = keras.layers.GlobalAveragePooling2D()(x) x = Dense(512 )(x) x = BatchNormalization()(x) x = LeakyReLU()(x) y_pred = Dense(4, activation='sigmoid' )(x) return Model(inputs=base_model.input, outputs=y_pred )
Understanding Clouds from Satellite Images
5,920,350
scores = cross_val_score(test_model, x, y, cv=10, scoring='accuracy' )<compute_test_metric>
model = get_model()
Understanding Clouds from Satellite Images
5,920,350
print(scores) print("Mean Accuracy: {}".format(scores.mean()))<train_model>
for base_layer in model.layers[:-1]: base_layer.trainable = False lr=1e-3 adam = Adam(lr=1e-3) model.compile(optimizer=adam, loss='binary_crossentropy', metrics=["accuracy"]) history_0 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=1, workers=num_cores, use_multiprocessing=True, verbose=1 )
Understanding Clouds from Satellite Images
5,920,350
model = RandomForestClassifier( n_estimators=best_params['n_estimators'], criterion=best_params['criterion'], max_features=best_params['max_features'], max_depth=best_params['max_depth'], min_samples_split=best_params['min_samples_split'], min_samples_leaf=best_params['min_samples_leaf'], max_leaf_nodes=best_params['max_leaf_nodes'], bootstrap=best_params['bootstrap'] ) model.fit(x, y )<count_missing_values>
for base_layer in model.layers[:-1]: base_layer.trainable = True lr=lr/3.0 K.set_value(adam.lr, lr) model.compile(optimizer=adam, loss='binary_crossentropy', metrics=["accuracy"]) print("training...") history_1 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=20, use_multiprocessing=True, workers=num_cores, verbose=1 ) model.save("model.h5" )
Understanding Clouds from Satellite Images
5,920,350
print(raw_test.isnull().sum()) print("-"*10) print(raw_test.isnull().sum() /raw_test.shape[0] )<feature_engineering>
lr=lr/3.0 K.set_value(adam.lr, lr) history_1 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=15, use_multiprocessing=True, workers=num_cores, verbose=1 ) model.save("model.h5" )
Understanding Clouds from Satellite Images
5,920,350
raw_test['Fare'].fillna(raw_test['Fare'].mean() , inplace=True) raw_test['NoCabin'] = np.where(raw_test['Cabin'].isnull() , 1, 0) raw_test['IsChildren'] = np.where(raw_test['Age']<=10, 1, 0) raw_test['IsAgeNull'] = np.where(np.isnan(raw_test['Age']), 1, 0) raw_test['Age'].fillna(raw_test['Age'].mean() , inplace=True) raw_test['Age'] = raw_test['Age'].round().astype(int) raw_test['AgeLabel'] = pd.cut(raw_test['Age'], bins=np.arange(start=0, stop=90, step=10), labels=np.arange(start=0, stop=8, step=1), include_lowest=True) raw_test['FamilySize'] = raw_test.apply(lambda row: row['SibSp']+row['Parch'], axis=1) raw_test['LowFare'] = np.where(raw_test['Fare']<=50, 1, 0) raw_test['HighFare'] = np.where(raw_test['Fare']>300, 1, 0) raw_test['MediumFare'] = raw_test.apply(medium_fare, axis=1) raw_test['FareLabel'] = pd.cut(raw_test.Fare, bins=np.arange(start=0, stop=600, step=50), precision=0, labels=np.arange(start=0, stop=11, step=1), include_lowest=True) raw_test['NoFamily'] = np.where(raw_test['FamilySize']==0, 1, 0) raw_test['SmallFamily'] = np.where(( raw_test['FamilySize']>0)&(raw_test['FamilySize']<4), 1, 0) raw_test['MediumFamily'] = np.where(( raw_test['FamilySize']>3)&(raw_test['FamilySize']<7), 1, 0) raw_test['LargeFamily'] = np.where(raw_test['FamilySize']>=7, 1, 0) raw_test['Title'] = raw_test.Name.str.extract('([A-Za-z]+)\.', expand=False) raw_test['Title'] = raw_test['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') raw_test['Title'] = raw_test['Title'].replace('Mlle', 'Miss') raw_test['Title'] = raw_test['Title'].replace('Ms', 'Miss') raw_test['Title'] = raw_test['Title'].replace('Mme', 'Mrs') raw_test = pd.concat([raw_test, pd.get_dummies(raw_test['Sex'])], axis=1) raw_test = pd.concat([raw_test, pd.get_dummies(raw_test['Title'], prefix='title')], axis=1) raw_test = pd.concat([raw_test, pd.get_dummies(raw_test['Pclass'], prefix='Pclass')], axis=1) data_test = raw_test[need_columns] <save_to_csv>
lr=lr/3.0 K.set_value(adam.lr, lr) history_1 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=10, use_multiprocessing=True, workers=num_cores, verbose=1 ) model.save("model.h5" )
Understanding Clouds from Satellite Images
5,920,350
ids = raw_test['PassengerId'] predictions = model.predict(data_test) output = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions }) output.to_csv('submission.csv', index = False) output.head(10 )<load_from_csv>
lr=lr/3.0 K.set_value(adam.lr, lr) history_1 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=10, use_multiprocessing=True, workers=num_cores, verbose=1 ) model.save("model.h5" )
Understanding Clouds from Satellite Images
5,920,350
for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) train_data = pd.read_csv("/kaggle/input/titanic/train.csv") <feature_engineering>
class_names = ['Fish', 'Flower', 'Sugar', 'Gravel'] def get_threshold_for_recall(y_true, y_pred, class_i, recall_threshold=0.95, precision_threshold=0.94, plot=False): precision, recall, thresholds = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i]) i = len(thresholds)- 1 best_recall_threshold = None while best_recall_threshold is None: next_threshold = thresholds[i] next_recall = recall[i] if next_recall >= recall_threshold: best_recall_threshold = next_threshold i -= 1 best_precision_threshold = [thres for prec, thres in zip(precision, thresholds)if prec >= precision_threshold][0] if plot: plt.figure(figsize=(10, 7)) plt.step(recall, precision, color='r', alpha=0.3, where='post') plt.fill_between(recall, precision, alpha=0.3, color='r') plt.axhline(y=precision[i + 1]) recall_for_prec_thres = [rec for rec, thres in zip(recall, thresholds) if thres == best_precision_threshold][0] plt.axvline(x=recall_for_prec_thres, color='g') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.legend(['PR curve', f'Precision {precision[i + 1]:.2f} corresponding to selected recall threshold', f'Recall {recall_for_prec_thres:.2f} corresponding to selected precision threshold']) plt.title(f'Precision-Recall curve for Class {class_names[class_i]}') return best_recall_threshold, best_precision_threshold y_pred = model.predict_generator(data_generator_val, workers=num_cores, verbose=1) y_true = data_generator_val.get_labels() recall_thresholds = dict() precision_thresholds = dict() for i, class_name in tqdm(enumerate(class_names)) : recall_thresholds[class_name], precision_thresholds[class_name] = get_threshold_for_recall(y_true, y_pred, i, plot=True )
Understanding Clouds from Satellite Images
5,920,350
train_data["Name"] = train_data["Name"].str.split(',' ).str[1] train_data["Name"] = train_data["Name"].str.split('.' ).str[0] train_data["Name"] = train_data["Name"].str.strip()<groupby>
data_generator_test = DataGenenerator(folder_imgs=test_imgs_folder, shuffle=False) y_pred_test = model.predict_generator(data_generator_test, workers=num_cores, verbose=1 )
Understanding Clouds from Satellite Images
5,920,350
x = train_data.groupby('Name' ).agg(['count'] ).index.get_level_values('Name') x<categorify>
image_labels_empty = set() for i,(img, predictions)in enumerate(zip(os.listdir(test_imgs_folder), y_pred_test)) : for class_i, class_name in enumerate(class_names): if predictions[class_i] < recall_thresholds[class_name]: image_labels_empty.add(f'{img}_{class_name}' )
Understanding Clouds from Satellite Images
5,920,350
train_data["Age"] = train_data.groupby("Name" ).transform(lambda x: x.fillna(x.mean())) ['Age'] train_data['Sex'].replace({'female':0,'male':1},inplace=True) train_data.head()<define_variables>
submission = pd.read_csv('.. /input/efficient-net-b4-unet-clouds/submission.csv') submission.head()
Understanding Clouds from Satellite Images
5,920,350
train_data_log = train_data.iloc[:,[False,False,True, False,True,True,True,True,False,True,False,False]] normalized_data_train=(train_data_log-train_data_log.min())/(train_data_log.max() -train_data_log.min()) train_labels_log = train_data.iloc[:,1] normalized_data_train.head()<compute_test_metric>
predictions_nonempty = set(submission.loc[~submission['EncodedPixels'].isnull() , 'Image_Label'].values )
Understanding Clouds from Satellite Images
5,920,350
def log_odds(data,coefficients,intercept): return np.dot(data,coefficients)+ intercept l_o= log_odds(normalized_data_train,initial_coefs, initial_intercept) <compute_test_metric>
print(f'{len(image_labels_empty.intersection(predictions_nonempty)) } masks would be removed' )
Understanding Clouds from Satellite Images
5,920,350
def sigmoid(log_odds_vars): sigmoid_values = 1/(1+np.exp(-log_odds_vars)) return sigmoid_values sigmoid_vals = sigmoid(l_o) <compute_test_metric>
submission.loc[submission['Image_Label'].isin(image_labels_empty), 'EncodedPixels'] = np.nan submission.to_csv('submission_segmentation_and_classifier.csv', index=None )
Understanding Clouds from Satellite Images
5,920,350
<compute_train_metric><EOS>
FileLink(r'submission_segmentation_and_classifier.csv' )
Understanding Clouds from Satellite Images
6,096,333
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<compute_train_metric>
!pip install tta-wrapper --quiet seed = 0 seed_everything(seed) warnings.filterwarnings("ignore" )
Understanding Clouds from Satellite Images
6,096,333
best_coef = best_coefs[0] best_int = best_coefs[1] v = sigmoid(log_odds(normalized_data_train,best_coef,best_int)) <define_variables>
train = pd.read_csv('.. /input/understanding_cloud_organization/train.csv') submission = pd.read_csv('.. /input/understanding_cloud_organization/sample_submission.csv') train['image'] = train['Image_Label'].apply(lambda x: x.split('_')[0]) train['label'] = train['Image_Label'].apply(lambda x: x.split('_')[1]) submission['image'] = submission['Image_Label'].apply(lambda x: x.split('_')[0]) test = pd.DataFrame(submission['image'].unique() , columns=['image']) train_df = pd.pivot_table(train, index=['image'], values=['EncodedPixels'], columns=['label'], aggfunc=np.min ).reset_index() train_df.columns = ['image', 'Fish_mask', 'Flower_mask', 'Gravel_mask', 'Sugar_mask'] print('Compete set samples:', len(train_df)) print('Test samples:', len(submission)) display(train.head() )
Understanding Clouds from Satellite Images
6,096,333
def find_threshold(sigmoid_vals): predictions = [] vals = [] accuracies = [] for num in range(1000): vals.append(num/1000) accuracy = 0 for i in v: if i > num/1000: predictions.append(1) else: predictions.append(0) for j in range(len(predictions)) : if predictions[j] == train_labels_log[j]: accuracy += 1 accuracies.append(accuracy/len(predictions)) accuracy = 0 predictions = [] indx = accuracies.index(max(accuracies)) print("Best accuracy on training set:") print(max(accuracies)) best_threshold = vals[indx] return best_threshold best_thresh = find_threshold(v) print(best_thresh )<categorify>
X_train, X_val = train_test_split(train_df, test_size=0.2, random_state=seed) X_train['set'] = 'train' X_val['set'] = 'validation' test['set'] = 'test' print('Train samples: ', len(X_train)) print('Validation samples: ', len(X_val))
Understanding Clouds from Satellite Images
6,096,333
def calculate_precision(sigmoid_vals, threshold, labels): "Precision is True Positives /(True Positives + False Positives)" predictions = [] true_positives = 0 false_positives = 0 for i in sigmoid_vals: if i > threshold: predictions.append(1) else: predictions.append(0) for i in range(len(labels)) : if labels[i] == 1 and labels[i] == predictions[i]: true_positives += 1 elif labels[i] == 0 and labels[i] != predictions[i]: false_positives += 1 return true_positives/(true_positives + false_positives) print("Precision:") print(calculate_precision(v, best_thresh, train_labels_log)) <define_variables>
BACKBONE = 'resnet18' BATCH_SIZE = 32 EPOCHS = 12 LEARNING_RATE = 3e-4 HEIGHT = 384 WIDTH = 480 CHANNELS = 3 N_CLASSES = 4 ES_PATIENCE = 5 RLROP_PATIENCE = 3 DECAY_DROP = 0.5 model_path = 'uNet_%s_%sx%s.h5' %(BACKBONE, HEIGHT, WIDTH )
Understanding Clouds from Satellite Images
6,096,333
def calculate_recall(sigmoid_vals, threshold, labels): "Precision is True Positives /(True Positives + False Negatives)" predictions = [] true_positives = 0 false_negatives = 0 for i in sigmoid_vals: if i > threshold: predictions.append(1) else: predictions.append(0) for i in range(len(labels)) : if labels[i] == 1 and labels[i] == predictions[i]: true_positives += 1 elif labels[i] == 1 and labels[i] != predictions[i]: false_negatives += 1 return true_positives/(true_positives + false_negatives) print("Recall") print(calculate_recall(v, best_thresh, train_labels_log)) <feature_engineering>
preprocessing = sm.get_preprocessing(BACKBONE) augmentation = albu.Compose([albu.HorizontalFlip(p=0.5), albu.VerticalFlip(p=0.5), albu.ShiftScaleRotate(rotate_limit=30, shift_limit=0.1, p=0.5) ] )
Understanding Clouds from Satellite Images
6,096,333
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data["Name"] = test_data["Name"].str.split(',' ).str[1] test_data["Name"] = test_data["Name"].str.split('.' ).str[0] test_data["Name"] = test_data["Name"].str.strip() test_data['Sex'].replace({'female':0,'male':1},inplace=True) x = test_data.groupby('Name' ).agg(['count'] ).index.get_level_values('Name') test_data["Age"] = test_data.groupby("Name" ).transform(lambda x: x.fillna(x.mean())) ['Age'] test_data_log = test_data.iloc[:,[False,True,False,True,True,True,True,False,True,False,False]] normalized_data_test=(test_data_log-test_data_log.min())/(test_data_log.max() -test_data_log.min()) <compute_test_metric>
train_generator = DataGenerator( directory=train_images_dest_path, dataframe=X_train, target_df=train, batch_size=BATCH_SIZE, target_size=(HEIGHT, WIDTH), n_channels=CHANNELS, n_classes=N_CLASSES, preprocessing=preprocessing, augmentation=augmentation, seed=seed) valid_generator = DataGenerator( directory=validation_images_dest_path, dataframe=X_val, target_df=train, batch_size=BATCH_SIZE, target_size=(HEIGHT, WIDTH), n_channels=CHANNELS, n_classes=N_CLASSES, preprocessing=preprocessing, seed=seed )
Understanding Clouds from Satellite Images
6,096,333
pred_test = sigmoid(log_odds(normalized_data_test,best_coef,best_int))<define_variables>
model = sm.Unet(backbone_name=BACKBONE, encoder_weights='imagenet', classes=N_CLASSES, activation='sigmoid', input_shape=(HEIGHT, WIDTH, CHANNELS)) checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True) es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1) rlrop = ReduceLROnPlateau(monitor='val_loss', mode='min', patience=RLROP_PATIENCE, factor=DECAY_DROP, min_lr=1e-6, verbose=1) metric_list = [dice_coef, sm.metrics.iou_score] callback_list = [checkpoint, es, rlrop] optimizer = RAdam(learning_rate=LEARNING_RATE, warmup_proportion=0.1) model.compile(optimizer=optimizer, loss=sm.losses.bce_dice_loss, metrics=metric_list) model.summary()
Understanding Clouds from Satellite Images
6,096,333
classifier = [] for i in range(len(pred_test)) : if pred_test[i] > best_thresh: classifier.append(1) else: classifier.append(0 )<save_to_csv>
STEP_SIZE_TRAIN = len(X_train)//BATCH_SIZE STEP_SIZE_VALID = len(X_val)//BATCH_SIZE history = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, callbacks=callback_list, epochs=EPOCHS, verbose=2 ).history
Understanding Clouds from Satellite Images
6,096,333
data = {'PassengerId': test_data["PassengerId"].values, 'Survived':classifier} df_submission = pd.DataFrame(data) df_submission.to_csv("submission_log_regression2.csv",index=False) <set_options>
model = load_model('.. /input/cloud-seg-resnet18-trainedlonger/resnet18_trained_longer.h5', custom_objects={'RAdam':RAdam, 'binary_crossentropy_plus_dice_loss':sm.losses.bce_dice_loss, 'dice_coef':dice_coef, 'iou_score':sm.metrics.iou_score, 'f1-score':sm.metrics.f1_score} )
Understanding Clouds from Satellite Images
6,096,333
assert sys.version_info >=(3, 5) assert sklearn.__version__ >= "0.20" %matplotlib inline warnings.filterwarnings(action="ignore" )<load_from_csv>
class_names = ['Fish ', 'Flower', 'Gravel', 'Sugar '] best_tresholds = [.5,.5,.5,.35] best_masks = [25000, 20000, 22500, 15000] for index, name in enumerate(class_names): print('%s treshold=%.2f mask size=%d' %(name, best_tresholds[index], best_masks[index]))
Understanding Clouds from Satellite Images
6,096,333
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )<create_dataframe>
train_metrics = get_metrics(model, train, X_train, train_images_dest_path, best_tresholds, best_masks, seed=seed, preprocessing=preprocessing, set_name='Train') display(train_metrics) validation_metrics = get_metrics(model, train, X_val, validation_images_dest_path, best_tresholds, best_masks, seed=seed, preprocessing=preprocessing, set_name='Validation') display(validation_metrics )
Understanding Clouds from Satellite Images
6,096,333
passengerID = test['PassengerId'].copy()<concatenate>
model = tta_segmentation(model, h_flip=True, v_flip=True, h_shift=(-10, 10), v_shift=(-10, 10), merge='mean' )
Understanding Clouds from Satellite Images
6,096,333
train["split"] = "train" test["split"] = "test" data = pd.concat([train , test], ignore_index=True) data.set_index("PassengerId") data["Survived"].fillna(0.0,inplace=True) print(train.shape, test.shape, data.shape )<count_unique_values>
test_df = [] for i in range(0, test.shape[0], 300): batch_idx = list(range(i, min(test.shape[0], i + 300))) batch_set = test[batch_idx[0]: batch_idx[-1]+1] test_generator = DataGenerator( directory=test_images_dest_path, dataframe=batch_set, target_df=submission, batch_size=1, target_size=(HEIGHT, WIDTH), n_channels=CHANNELS, n_classes=N_CLASSES, preprocessing=preprocessing, seed=seed, mode='predict', shuffle=False) preds = model.predict_generator(test_generator) for index, b in enumerate(batch_idx): filename = test['image'].iloc[b] image_df = submission[submission['image'] == filename].copy() pred_masks = preds[index, ].round().astype(int) pred_rles = build_rles(pred_masks, reshape=(350, 525)) image_df['EncodedPixels'] = pred_rles pred_masks_post = preds[index, ].astype('float32') for class_index in range(N_CLASSES): pred_mask = pred_masks_post[...,class_index] pred_mask = post_process(pred_mask, threshold=best_tresholds[class_index], min_size=best_masks[class_index]) pred_masks_post[...,class_index] = pred_mask pred_rles_post = build_rles(pred_masks_post, reshape=(350, 525)) image_df['EncodedPixels_post'] = pred_rles_post test_df.append(image_df) sub_df = pd.concat(test_df )
Understanding Clouds from Satellite Images
6,096,333
data.apply(lambda x: len(x.unique()))<feature_engineering>
images_to_inspect = np.random.choice(X_val['image'].unique() , 3, replace=False) inspect_set = train[train['image'].isin(images_to_inspect)].copy() inspect_set_temp = [] inspect_generator = DataGenerator( directory=validation_images_dest_path, dataframe=inspect_set, target_df=train, batch_size=1, target_size=(HEIGHT, WIDTH), n_channels=CHANNELS, n_classes=N_CLASSES, preprocessing=preprocessing, seed=seed, mode='fit', shuffle=False) preds = model.predict_generator(inspect_generator) for index, b in enumerate(range(len(preds))): filename = inspect_set['image'].iloc[b] image_df = inspect_set[inspect_set['image'] == filename].copy() pred_masks = preds[index, ].round().astype(int) pred_rles = build_rles(pred_masks, reshape=(350, 525)) image_df['EncodedPixels_pred'] = pred_rles pred_masks_post = preds[index, ].astype('float32') for class_index in range(N_CLASSES): pred_mask = pred_masks_post[...,class_index] pred_mask = post_process(pred_mask, threshold=best_tresholds[class_index], min_size=best_masks[class_index]) pred_masks_post[...,class_index] = pred_mask pred_rles_post = build_rles(pred_masks_post, reshape=(350, 525)) image_df['EncodedPixels_pred_post'] = pred_rles_post inspect_set_temp.append(image_df) inspect_set = pd.concat(inspect_set_temp) inspect_predictions(inspect_set, images_to_inspect, validation_images_dest_path, pred_col='EncodedPixels_pred' )
Understanding Clouds from Satellite Images
6,096,333
data['FamilySize'] = data['SibSp'] + data['Parch'] + 1 data['IsAlone'] = 0 data.loc[data['FamilySize'] == 1, 'IsAlone'] = 1<count_values>
inspect_predictions(inspect_set, images_to_inspect, validation_images_dest_path, pred_col='EncodedPixels_pred_post' )
Understanding Clouds from Satellite Images
6,096,333
data.groupby('Fare')['Fare'].unique().value_counts()<feature_engineering>
images_to_inspect_test = np.random.choice(sub_df['image'].unique() , 4, replace=False) inspect_predictions(sub_df, images_to_inspect_test, test_images_dest_path )
Understanding Clouds from Satellite Images
6,096,333
data['Fare'].fillna(data['Fare'].median() , inplace=True) data.loc[data['Fare'] <= 7.91, 'Fare'] = 0 data.loc[(data['Fare'] > 7.91)&(data['Fare'] <= 14.454), 'Fare'] = 1 data.loc[(data['Fare'] > 14.454)&(data['Fare'] <= 31), 'Fare'] = 2 data.loc[ data['Fare'] > 31, 'Fare'] = 3 data['Fare'] = data['Fare'].astype(int )<feature_engineering>
inspect_predictions(sub_df, images_to_inspect_test, test_images_dest_path, label_col='EncodedPixels_post' )
Understanding Clouds from Satellite Images
6,096,333
data['Has_Cabin'] = data["Cabin"].apply(lambda x: 0 if type(x)== float else 1 )<data_type_conversions>
submission_df = sub_df[['Image_Label' ,'EncodedPixels']] submission_df.to_csv('submission.csv', index=False) display(submission_df.head() )
Understanding Clouds from Satellite Images
6,096,333
avg = data['Age'].mean() std = data['Age'].std() null_count = data['Age'].isnull().sum() random_list = np.random.randint(avg - std, avg + std, size = null_count) data['Age'][np.isnan(data['Age'])] = random_list data['Age'] = data['Age'].astype(int )<feature_engineering>
submission_df_post = sub_df[['Image_Label' ,'EncodedPixels_post']] submission_df_post.columns = ['Image_Label' ,'EncodedPixels'] submission_df_post.to_csv('submission_post.csv', index=False) display(submission_df_post.head() )
Understanding Clouds from Satellite Images
6,096,333
<data_type_conversions><EOS>
if os.path.exists(train_images_dest_path): shutil.rmtree(train_images_dest_path) if os.path.exists(validation_images_dest_path): shutil.rmtree(validation_images_dest_path) if os.path.exists(test_images_dest_path): shutil.rmtree(test_images_dest_path )
Understanding Clouds from Satellite Images
5,862,707
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<data_type_conversions>
seed(10) set_random_seed(10) %matplotlib inline
Understanding Clouds from Satellite Images
5,862,707
data['Embarked'].fillna('S', inplace=True) data['Embarked'] = data['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int )<categorify>
!pip install keras-rectified-adam
Understanding Clouds from Satellite Images
5,862,707
data['Title'] = data['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] data['Title'] = data['Title'].replace(['Miss', 'Mrs','Ms', 'Mlle', 'Lady', 'Mme', 'the Countess', 'Dona'], 'Miss/Mrs/Ms') data['Title'] = data['Title'].replace(['Dr', 'Col', 'Major', 'Jonkheer', 'Capt', 'Sir', 'Don'], 'Dr/Military/Noble') data['Title'] = data['Title'].fillna('Mr' ).map({'Mr': 0, 'Mrs': 1, 'Miss/Mrs/Ms': 2, 'Dr/Military/Noble': 3, 'Master': 4, 'Rev': 5} ).astype(int )<count_values>
test_imgs_folder = '.. /input/understanding_cloud_organization/test_images/' train_imgs_folder = '.. /input/understanding_cloud_organization/train_images/' num_cores = multiprocessing.cpu_count()
Understanding Clouds from Satellite Images
5,862,707
data['Title'].value_counts()<drop_column>
train_df = pd.read_csv('.. /input/understanding_cloud_organization/train.csv') train_df.head()
Understanding Clouds from Satellite Images
5,862,707
drop_columns = ["PassengerId", "Ticket","Cabin", "Name", "SibSp"] data = data.drop(drop_columns, axis=1 )<count_missing_values>
train_df = train_df[~train_df['EncodedPixels'].isnull() ] train_df['Image'] = train_df['Image_Label'].map(lambda x: x.split('_')[0]) train_df['Class'] = train_df['Image_Label'].map(lambda x: x.split('_')[1]) classes = train_df['Class'].unique() train_df = train_df.groupby('Image')['Class'].agg(set ).reset_index() for class_name in classes: train_df[class_name] = train_df['Class'].map(lambda x: 1 if class_name in x else 0) train_df.head()
Understanding Clouds from Satellite Images
5,862,707
data.isnull().sum()<drop_column>
img_2_ohe_vector = {img:vec for img, vec in zip(train_df['Image'], train_df.iloc[:, 2:].values)}
Understanding Clouds from Satellite Images
5,862,707
train = data[data['split'] == 'train'].copy() test = data[data['split'] == 'test'].copy() train_labels = train["Survived"].copy() train.drop(['split','Survived'], axis=1, inplace=True) test.drop(['split','Survived'], axis=1, inplace=True )<split>
train_imgs, val_imgs = train_test_split(train_df['Image'].values, test_size=0.2, stratify=train_df['Class'].map(lambda x: str(sorted(list(x)))) , random_state=2019 )
Understanding Clouds from Satellite Images
5,862,707
train, train_val, train_labels, train_val_labels = train_test_split(train, train_labels, test_size=0.2, random_state=42 )<choose_model_class>
class DataGenenerator(Sequence): def __init__(self, images_list=None, folder_imgs=train_imgs_folder, batch_size=32, shuffle=True, augmentation=None, resized_height=260, resized_width=260, num_channels=3): self.batch_size = batch_size self.shuffle = shuffle self.augmentation = augmentation if images_list is None: self.images_list = os.listdir(folder_imgs) else: self.images_list = deepcopy(images_list) self.folder_imgs = folder_imgs self.len = len(self.images_list)// self.batch_size self.resized_height = resized_height self.resized_width = resized_width self.num_channels = num_channels self.num_classes = 4 self.is_test = not 'train' in folder_imgs if not shuffle and not self.is_test: self.labels = [img_2_ohe_vector[img] for img in self.images_list[:self.len*self.batch_size]] def __len__(self): return self.len def on_epoch_start(self): if self.shuffle: random.shuffle(self.images_list) def __getitem__(self, idx): current_batch = self.images_list[idx * self.batch_size:(idx + 1)* self.batch_size] X = np.empty(( self.batch_size, self.resized_height, self.resized_width, self.num_channels)) y = np.empty(( self.batch_size, self.num_classes)) for i, image_name in enumerate(current_batch): path = os.path.join(self.folder_imgs, image_name) img = cv2.resize(cv2.imread(path),(self.resized_height, self.resized_width)).astype(np.float32) if not self.augmentation is None: augmented = self.augmentation(image=img) img = augmented['image'] X[i, :, :, :] = img/255.0 if not self.is_test: y[i, :] = img_2_ohe_vector[image_name] return X, y def get_labels(self): if self.shuffle: images_current = self.images_list[:self.len*self.batch_size] labels = [img_2_ohe_vector[img] for img in images_current] else: labels = self.labels return np.array(labels )
Understanding Clouds from Satellite Images
5,862,707
num_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="median")) , ("MinMaxScaler", MinMaxScaler()) ] )<normalization>
albumentations_train = Compose([ VerticalFlip() , HorizontalFlip() , Rotate(limit=20), GridDistortion() ], p=1 )
Understanding Clouds from Satellite Images
5,862,707
train_prepared = num_pipeline.fit_transform(train) train_val = num_pipeline.fit_transform(train_val )<train_on_grid>
data_generator_train = DataGenenerator(train_imgs, augmentation=albumentations_train) data_generator_train_eval = DataGenenerator(train_imgs, shuffle=False) data_generator_val = DataGenenerator(val_imgs, shuffle=False )
Understanding Clouds from Satellite Images
5,862,707
model_lgre = LogisticRegression(random_state=0) param_grid = {'C': [0.014,0.012], 'multi_class': ['multinomial'], 'penalty': ['l1'],'solver': ['saga'], 'tol': [0.1] } GridCV_LR = GridSearchCV(model_lgre, param_grid, verbose=1, cv=5) GridCV_LR.fit(train_prepared,train_labels) score_grid_LR = GridCV_LR.best_score_ model_lgre = GridCV_LR.best_estimator_ print(score_grid_LR) predict_score_lg_clf = GridCV_LR.predict(train_val) report_and_confusion_matrix(train_val_labels, predict_score_lg_clf )<train_on_grid>
class PrAucCallback(Callback): def __init__(self, data_generator, num_workers=num_cores, early_stopping_patience=5, plateau_patience=3, reduction_rate=0.5, stage='train', checkpoints_path='checkpoints/'): super(Callback, self ).__init__() self.data_generator = data_generator self.num_workers = num_workers self.class_names = ['Fish', 'Flower', 'Sugar', 'Gravel'] self.history = [[] for _ in range(len(self.class_names)+ 1)] self.early_stopping_patience = early_stopping_patience self.plateau_patience = plateau_patience self.reduction_rate = reduction_rate self.stage = stage self.best_pr_auc = -float('inf') if not os.path.exists(checkpoints_path): os.makedirs(checkpoints_path) self.checkpoints_path = checkpoints_path def compute_pr_auc(self, y_true, y_pred): pr_auc_mean = 0 print(f" {' ") for class_i in range(len(self.class_names)) : precision, recall, _ = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i]) pr_auc = auc(recall, precision) pr_auc_mean += pr_auc/len(self.class_names) print(f"PR AUC {self.class_names[class_i]}, {self.stage}: {pr_auc:.3f} ") self.history[class_i].append(pr_auc) print(f" {' PR AUC mean, {self.stage}: {pr_auc_mean:.3f} {' ") self.history[-1].append(pr_auc_mean) return pr_auc_mean def is_patience_lost(self, patience): if len(self.history[-1])> patience: best_performance = max(self.history[-1][-(patience + 1):-1]) return best_performance == self.history[-1][-(patience + 1)] and best_performance >= self.history[-1][-1] def early_stopping_check(self, pr_auc_mean): if self.is_patience_lost(self.early_stopping_patience): self.model.stop_training = True def model_checkpoint(self, pr_auc_mean, epoch): if pr_auc_mean > self.best_pr_auc: for checkpoint in glob.glob(os.path.join(self.checkpoints_path, 'classifier_densenet169_epoch_*')) : os.remove(checkpoint) self.best_pr_auc = pr_auc_mean self.model.save(os.path.join(self.checkpoints_path, f'classifier_densenet169_epoch_{epoch}_val_pr_auc_{pr_auc_mean}.h5')) print(f" {' Saved new checkpoint {' ") def reduce_lr_on_plateau(self): if self.is_patience_lost(self.plateau_patience): new_lr = float(keras.backend.get_value(self.model.optimizer.lr)) * self.reduction_rate keras.backend.set_value(self.model.optimizer.lr, new_lr) print(f" {' Reduced learning rate to {new_lr}. {' ") def on_epoch_end(self, epoch, logs={}): y_pred = self.model.predict_generator(self.data_generator, workers=self.num_workers) y_true = self.data_generator.get_labels() pr_auc_mean = self.compute_pr_auc(y_true, y_pred) if self.stage == 'val': self.early_stopping_check(pr_auc_mean) self.model_checkpoint(pr_auc_mean, epoch) self.reduce_lr_on_plateau() def get_pr_auc_history(self): return self.history
Understanding Clouds from Satellite Images
5,862,707
rand_forest_clf = RandomForestClassifier(random_state=42) param_grid = {'max_depth': [15], 'min_samples_split': [5],'n_estimators' : [100] } GridCV_rd_clf = GridSearchCV(rand_forest_clf, param_grid, verbose=1, cv=5) GridCV_rd_clf.fit(train_prepared, train_labels) score_grid_rd = GridCV_rd_clf.best_score_ rand_forest_clf = GridCV_rd_clf.best_estimator_ print(score_grid_rd) predict_score_rd_clf = GridCV_rd_clf.predict(train_val) report_and_confusion_matrix(train_val_labels, predict_score_rd_clf )<choose_model_class>
train_metric_callback = PrAucCallback(data_generator_train_eval) val_callback = PrAucCallback(data_generator_val, stage='val' )
Understanding Clouds from Satellite Images
5,862,707
tree_clf = DecisionTreeClassifier()<train_on_grid>
def dice_coef(y_true, y_pred, smooth=1): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return(2.* intersection + smooth)/(K.sum(y_true_f)+ K.sum(y_pred_f)+ smooth) def dice_loss(y_true, y_pred): smooth = 1. y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = y_true_f * y_pred_f score =(2.* K.sum(intersection)+ smooth)/(K.sum(y_true_f)+ K.sum(y_pred_f)+ smooth) return 1.- score def bce_dice_loss(y_true, y_pred): return binary_crossentropy(y_true, y_pred)+ dice_loss(y_true, y_pred )
Understanding Clouds from Satellite Images
5,862,707
params_grid_ada = {} ada_model = AdaBoostClassifier(tree_clf,n_estimators=3000, algorithm="SAMME.R", learning_rate=0.05, random_state=42) GridCV_ada = GridSearchCV(ada_model, params_grid_ada, verbose=1, cv=5) GridCV_ada.fit(train_prepared, train_labels) score_grid_ada = GridCV_ada.best_score_ model_ada = GridCV_ada.best_estimator_ print(score_grid_ada) predict_score_ada = GridCV_ada.predict(train_val) report_and_confusion_matrix(train_val_labels, predict_score_ada )<choose_model_class>
!pip install -U git+https://github.com/qubvel/efficientnet
Understanding Clouds from Satellite Images
5,862,707
params_grid_gb = {} gbc_model = GradientBoostingClassifier(criterion='friedman_mse', init=None, learning_rate=0.05, loss='deviance', max_depth=3, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=10000, n_iter_no_change=None, presort='auto', random_state=None, subsample=1.0, tol=0.0001, validation_fraction=0.1, verbose=0, warm_start=False) GridCV_GB = GridSearchCV(gbc_model, params_grid_gb, verbose=1, cv=5) GridCV_GB.fit(train_prepared, train_labels) score_grid_GB = GridCV_GB.best_score_ model_gbc = GridCV_GB.best_estimator_ print(score_grid_GB) predict_score_GB = GridCV_GB.predict(train_val) report_and_confusion_matrix(train_val_labels, predict_score_GB )<train_on_grid>
def get_model() : K.clear_session() base_model = efn.EfficientNetB2(weights='imagenet', include_top=False, pooling='avg', input_shape=(260, 260, 3)) x = base_model.output y_pred = Dense(4, activation='sigmoid' )(x) return Model(inputs=base_model.input, outputs=y_pred) model = get_model()
Understanding Clouds from Satellite Images
5,862,707
mlp_clf = MLPClassifier(activation = "logistic", hidden_layer_sizes=(300,), random_state=42,batch_size=1000) param_grid = { 'max_iter': [1200], 'alpha': [1e-4], 'solver': ['sgd'], 'learning_rate_init': [0.05,0.06],'tol': [1e-4] } GridCV_MLP = GridSearchCV(mlp_clf, param_grid, verbose=1, cv=3) GridCV_MLP.fit(train,train_labels) score_grid_MLP = GridCV_MLP.best_score_ model_mlp = GridCV_MLP.best_estimator_ print(score_grid_MLP) predict_score_MLP = GridCV_MLP.predict(train_val) report_and_confusion_matrix(train_val_labels, predict_score_MLP )<choose_model_class>
from keras_radam import RAdam
Understanding Clouds from Satellite Images
5,862,707
params_grid_search_lgb = { } lgb_model = lgb.LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=0.7, gamma=0, importance_type='split', learning_rate=0.05, max_depth=3, min_child_samples=20, min_child_weight=6, min_split_gain=0.0, n_estimators=20000, n_jobs=-1, nthread=4, num_leaves=31, objective=None, random_state=None, reg_alpha=0.0, reg_lambda=0.0, scale_pos_weight=1, seed=29, silent=True, subsample=0.7, subsample_for_bin=200000, subsample_freq=0) GridCV_LGB = GridSearchCV(lgb_model, params_grid_search_lgb, verbose=1, cv=5) GridCV_LGB.fit(train_prepared,train_labels) score_grid_LGB = GridCV_LGB.best_score_ model_lgb = GridCV_LGB.best_estimator_ print(score_grid_LGB) predict_score_LGB = GridCV_LGB.predict(train_val) report_and_confusion_matrix(train_val_labels, predict_score_LGB )<train_on_grid>
for base_layer in model.layers[:-3]: base_layer.trainable = False model.compile(optimizer=RAdam(warmup_proportion=0.1, min_lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy']) history_0 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=20, callbacks=[train_metric_callback, val_callback], workers=num_cores, verbose=1 )
Understanding Clouds from Satellite Images
5,862,707
params_grid_xgb = {} xgb_model = xgb.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bytree=0.7, gamma=0.2, learning_rate=0.009, max_delta_step=0, max_depth=3, min_child_weight=6, missing=None, n_estimators=10000, n_jobs=1, nthread=4, objective='binary:logistic', random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=29, silent=True, subsample=0.7) GridCV_XGB = GridSearchCV(xgb_model, params_grid_xgb, verbose=1, cv=5) GridCV_XGB.fit(train_prepared, train_labels) score_grid_XGB = GridCV_XGB.best_score_ model_xgb = GridCV_XGB.best_estimator_ print(score_grid_XGB) predict_score_XGB = GridCV_XGB.predict(train_val) report_and_confusion_matrix(train_val_labels, predict_score_XGB )<train_on_grid>
for base_layer in model.layers[:-3]: base_layer.trainable = True model.compile(optimizer=RAdam(warmup_proportion=0.1, min_lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy']) history_1 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=20, callbacks=[train_metric_callback, val_callback], workers=num_cores, verbose=1, initial_epoch=1 )
Understanding Clouds from Satellite Images
5,862,707
params_grid_vclf = {} estimators = [ ("Ada Boosting", model_ada), ("Gradient Boost", model_gbc), ("lgb_model", model_lgb), ("XGBoost", model_xgb), ("MLP", model_mlp), ] voting_clf = VotingClassifier(estimators,voting='soft') GridCV_voting_clf = GridSearchCV(voting_clf, params_grid_vclf, verbose=1, cv=5) GridCV_voting_clf.fit(train_prepared, train_labels) score_grid_vclf = GridCV_voting_clf.best_score_ model_vclf = GridCV_voting_clf.best_estimator_ print(score_grid_vclf) predict_score_vclf = GridCV_voting_clf.predict(train_val) report_and_confusion_matrix(train_val_labels, predict_score_vclf) <categorify>
Image(".. /input/clouds-classifier-files/loss_hist_densenet169.png" )
Understanding Clouds from Satellite Images
5,862,707
test_prepared = num_pipeline.fit_transform(test )<predict_on_test>
Image(".. /input/clouds-classifier-files/pr_auc_hist_densenet169.png" )
Understanding Clouds from Satellite Images
5,862,707
sub_voting_classifier = model_vclf.predict(test_prepared )<choose_model_class>
class_names = ['Fish', 'Flower', 'Sugar', 'Gravel'] def get_threshold_for_recall(y_true, y_pred, class_i, recall_threshold=0.94, precision_threshold=0.90, plot=False): precision, recall, thresholds = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i]) i = len(thresholds)- 1 best_recall_threshold = None while best_recall_threshold is None: next_threshold = thresholds[i] next_recall = recall[i] if next_recall >= recall_threshold: best_recall_threshold = next_threshold i -= 1 best_precision_threshold = [thres for prec, thres in zip(precision, thresholds)if prec >= precision_threshold][0] if plot: plt.figure(figsize=(10, 7)) plt.step(recall, precision, color='r', alpha=0.3, where='post') plt.fill_between(recall, precision, alpha=0.3, color='r') plt.axhline(y=precision[i + 1]) recall_for_prec_thres = [rec for rec, thres in zip(recall, thresholds) if thres == best_precision_threshold][0] plt.axvline(x=recall_for_prec_thres, color='g') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.legend(['PR curve', f'Precision {precision[i + 1]:.2f} corresponding to selected recall threshold', f'Recall {recall_for_prec_thres:.2f} corresponding to selected precision threshold']) plt.title(f'Precision-Recall curve for Class {class_names[class_i]}') return best_recall_threshold, best_precision_threshold y_pred = model.predict_generator(data_generator_val, workers=num_cores) y_true = data_generator_val.get_labels() recall_thresholds = dict() precision_thresholds = dict() for i, class_name in tqdm(enumerate(class_names)) : recall_thresholds[class_name], precision_thresholds[class_name] = get_threshold_for_recall(y_true, y_pred, i, plot=True )
Understanding Clouds from Satellite Images
5,862,707
estimators =[model_ada,model_lgb,model_gbc, model_mlp] sclf = StackingCVClassifier(classifiers= estimators, meta_classifier=model_xgb) print('3-fold cross validation: ') for clf, label in zip([model_ada, model_lgb, model_gbc, model_mlp, sclf], ['Ada Boosting', 'light gradient Boosting(LGBM)', 'Gradient Boosting', "MLP", 'StackingClassifier(Meta: XGboost)']): scores = model_selection.cross_val_score(clf, train_prepared, train_labels, cv=5, scoring='precision') print("precision: %0.2f(+/- %0.2f)[%s]" %(scores.mean() , scores.std() , label))<train_model>
data_generator_test = DataGenenerator(folder_imgs=test_imgs_folder, shuffle=False) y_pred_test = model.predict_generator(data_generator_test, workers=num_cores )
Understanding Clouds from Satellite Images
5,862,707
sclf.fit(train_prepared, train_labels )<predict_on_test>
image_labels_empty = set() for i,(img, predictions)in enumerate(zip(os.listdir(test_imgs_folder), y_pred_test)) : for class_i, class_name in enumerate(class_names): if predictions[class_i] < recall_thresholds[class_name]: image_labels_empty.add(f'{img}_{class_name}' )
Understanding Clouds from Satellite Images
5,862,707
prediction_sclf = sclf.predict(train_val) report_and_confusion_matrix(train_val_labels, prediction_sclf )<predict_on_test>
submission = pd.read_csv('.. /input/densenet201cloudy/densenet201.csv') submission.head()
Understanding Clouds from Satellite Images
5,862,707
prediction_stacking_clf = sclf.predict(test_prepared )<save_to_csv>
predictions_nonempty = set(submission.loc[~submission['EncodedPixels'].isnull() , 'Image_Label'].values )
Understanding Clouds from Satellite Images
5,862,707
sub = pd.read_csv('.. /input/gender_submission.csv') sub['Survived'] = sub_voting_classifier.astype('int64') sub.to_csv("voting_classifier.csv", index=False) sub1 = pd.read_csv('.. /input/gender_submission.csv') sub1['Survived'] = prediction_stacking_clf.astype('int64') sub.to_csv("stacking_clf.csv", index=False )<import_modules>
print(f'{len(image_labels_empty.intersection(predictions_nonempty)) } masks would be removed' )
Understanding Clouds from Satellite Images
5,862,707
<choose_model_class><EOS>
submission.loc[submission['Image_Label'].isin(image_labels_empty), 'EncodedPixels'] = np.nan submission.to_csv('submission_segmentation_and_classifier.csv', index=None )
Understanding Clouds from Satellite Images
5,815,864
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<categorify>
import os import json import albumentations as albu import cv2 import keras from keras import backend as K from keras.models import Model from keras.layers import Input from keras.layers.convolutional import Conv2D, Conv2DTranspose from keras.layers.pooling import MaxPooling2D from keras.layers.merge import concatenate from keras.losses import binary_crossentropy from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau from skimage.exposure import adjust_gamma import matplotlib.pyplot as plt import numpy as np import pandas as pd from tqdm import tqdm from sklearn.model_selection import train_test_split from keras.layers import LeakyReLU from keras.layers import Input, Conv2D, Conv2DTranspose, MaxPooling2D, concatenate, Dropout,BatchNormalization from keras.layers import Conv2D, Concatenate, MaxPooling2D from keras.layers import UpSampling2D, Dropout, BatchNormalization from keras import optimizers from keras.legacy import interfaces from keras.utils.generic_utils import get_custom_objects from keras.engine.topology import Input from keras.engine.training import Model from keras.layers.convolutional import Conv2D, UpSampling2D, Conv2DTranspose from keras.layers.core import Activation, SpatialDropout2D from keras.layers.merge import concatenate from keras.layers.normalization import BatchNormalization from keras.layers.pooling import MaxPooling2D from keras.layers import Input,Dropout,BatchNormalization,Activation,Add from keras.regularizers import l2 from keras.layers.core import Dense, Lambda from keras.layers.merge import concatenate, add from keras.layers import GlobalAveragePooling2D, Reshape, Dense, multiply, Permute from keras.optimizers import SGD from keras.preprocessing.image import ImageDataGenerator
Understanding Clouds from Satellite Images
5,815,864
labels = to_categorical(train_labels )<train_model>
!pip install.. /input/efficientnet-keras-source-code/repository/qubvel-efficientnet-c993591
Understanding Clouds from Satellite Images
5,815,864
model_history = model.fit(train, labels, epochs=200, batch_size=600,validation_split=0.2, verbose=2 )<categorify>
def post_process(probability, threshold, min_size): mask = cv2.threshold(probability, threshold, 1, cv2.THRESH_BINARY)[1] num_component, component = cv2.connectedComponents(mask.astype(np.uint8)) predictions = np.zeros(( 350, 525), np.float32) num = 0 for c in range(1, num_component): p =(component == c) if p.sum() > min_size: predictions[p] = 1 num += 1 return predictions, num def np_resize(img, input_shape): height, width = input_shape return cv2.resize(img,(width, height)) def mask2rle(img): pixels= img.T.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs) def rle2mask(rle, input_shape): width, height = input_shape[:2] mask= np.zeros(width*height ).astype(np.uint8) array = np.asarray([int(x)for x in rle.split() ]) starts = array[0::2] lengths = array[1::2] current_position = 0 for index, start in enumerate(starts): mask[int(start):int(start+lengths[index])] = 1 current_position += lengths[index] return mask.reshape(height, width ).T def build_masks(rles, input_shape, reshape=None): depth = len(rles) if reshape is None: masks = np.zeros(( *input_shape, depth)) else: masks = np.zeros(( *reshape, depth)) for i, rle in enumerate(rles): if type(rle)is str: if reshape is None: masks[:, :, i] = rle2mask(rle, input_shape) else: mask = rle2mask(rle, input_shape) reshaped_mask = np_resize(mask, reshape) masks[:, :, i] = reshaped_mask return masks def build_rles(masks, reshape=None): width, height, depth = masks.shape rles = [] for i in range(depth): mask = masks[:, :, i] if reshape: mask = mask.astype(np.float32) mask = np_resize(mask, reshape ).astype(np.int64) rle = mask2rle(mask) rles.append(rle) return rles
Understanding Clouds from Satellite Images
5,815,864
test_prepared = num_pipeline.fit_transform(test )<save_to_csv>
train_df = pd.read_csv('.. /input/understanding_cloud_organization/train.csv') train_df['ImageId'] = train_df['Image_Label'].apply(lambda x: x.split('_')[0]) train_df['ClassId'] = train_df['Image_Label'].apply(lambda x: x.split('_')[1]) train_df['hasMask'] = ~ train_df['EncodedPixels'].isna() print(train_df.shape) train_df.head()
Understanding Clouds from Satellite Images
5,815,864
submission_keras = model.predict_classes(test_prepared) sub['Survived'] = submission_keras.astype('int64') sub.to_csv("keras.csv", index=False )<import_modules>
mask_count_df = train_df.groupby('ImageId' ).agg(np.sum ).reset_index() mask_count_df.sort_values('hasMask', ascending=False, inplace=True) print(mask_count_df.shape) mask_count_df.head()
Understanding Clouds from Satellite Images
5,815,864
import numpy as np import pandas as pd<load_from_csv>
sub_df = pd.read_csv('.. /input/understanding_cloud_organization/sample_submission.csv') sub_df['ImageId'] = sub_df['Image_Label'].apply(lambda x: x.split('_')[0]) test_imgs = pd.DataFrame(sub_df['ImageId'].unique() , columns=['ImageId'] )
Understanding Clouds from Satellite Images
5,815,864
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )<load_from_csv>
class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, list_IDs, df, target_df=None, mode='fit', base_path='.. /input/understanding_cloud_organization/train_images', batch_size=32, dim=(1400, 2100), n_channels=3, reshape=None, gamma=None, augment=False, n_classes=4, random_state=2019, shuffle=True): self.dim = dim self.batch_size = batch_size self.df = df self.mode = mode self.base_path = base_path self.target_df = target_df self.list_IDs = list_IDs self.reshape = reshape self.gamma = gamma self.n_channels = n_channels self.augment = augment self.n_classes = n_classes self.shuffle = shuffle self.random_state = random_state self.on_epoch_end() np.random.seed(self.random_state) def __len__(self): 'Denotes the number of batches per epoch' return int(np.floor(len(self.list_IDs)/ self.batch_size)) def __getitem__(self, index): 'Generate one batch of data' indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] list_IDs_batch = [self.list_IDs[k] for k in indexes] X = self.__generate_X(list_IDs_batch) if self.mode == 'fit': y = self.__generate_y(list_IDs_batch) if self.augment: X, y = self.__augment_batch(X, y) return X, y elif self.mode == 'predict': return X else: raise AttributeError('The mode parameter should be set to "fit" or "predict".') def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.list_IDs)) if self.shuffle == True: np.random.seed(self.random_state) np.random.shuffle(self.indexes) def __generate_X(self, list_IDs_batch): 'Generates data containing batch_size samples' if self.reshape is None: X = np.empty(( self.batch_size, *self.dim, self.n_channels)) else: X = np.empty(( self.batch_size, *self.reshape, self.n_channels)) for i, ID in enumerate(list_IDs_batch): im_name = self.df['ImageId'].loc[ID] img_path = f"{self.base_path}/{im_name}" img = self.__load_rgb(img_path) if self.reshape is not None: img = np_resize(img, self.reshape) if self.gamma is not None: img = adjust_gamma(img, gamma=self.gamma) X[i,] = img return X def __generate_y(self, list_IDs_batch): if self.reshape is None: y = np.empty(( self.batch_size, *self.dim, self.n_classes), dtype=int) else: y = np.empty(( self.batch_size, *self.reshape, self.n_classes), dtype=int) for i, ID in enumerate(list_IDs_batch): im_name = self.df['ImageId'].iloc[ID] image_df = self.target_df[self.target_df['ImageId'] == im_name] rles = image_df['EncodedPixels'].values if self.reshape is not None: masks = build_masks(rles, input_shape=self.dim, reshape=self.reshape) else: masks = build_masks(rles, input_shape=self.dim) y[i, ] = masks return y def __load_grayscale(self, img_path): img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img = img.astype(np.float32)/ 255. img = np.expand_dims(img, axis=-1) return img def __load_rgb(self, img_path): img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img.astype(np.float32)/ 255. return img def __random_transform(self, img, masks): composition = albu.Compose([ albu.HorizontalFlip() , ]) composed = composition(image=img, mask=masks) aug_img = composed['image'] aug_masks = composed['mask'] return aug_img, aug_masks def __augment_batch(self, img_batch, masks_batch): for i in range(img_batch.shape[0]): img_batch[i, ], masks_batch[i, ] = self.__random_transform( img_batch[i, ], masks_batch[i, ]) return img_batch, masks_batch
Understanding Clouds from Satellite Images
5,815,864
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )<data_type_conversions>
def H(lst, name, use_gn=False): if use_gn: norm = GroupNormalization(groups=1, name=name+'_gn') else: norm = BatchNormalization(name=name+'_bn') x = concatenate(lst) num_filters = int(x.shape.as_list() [-1]/2) x = Conv2D(num_filters,(2, 2), padding='same', name=name )(x) x = norm(x) x = LeakyReLU(alpha=0.1, name=name+'_activation' )(x) return x def U(x, use_gn=False): if use_gn: norm = GroupNormalization(groups=1) else: norm = BatchNormalization() num_filters = int(x.shape.as_list() [-1]/2) x = Conv2DTranspose(num_filters,(3, 3), strides=(2, 2), padding='same' )(x) x = norm(x) x = LeakyReLU(alpha=0.1 )(x) return x
Understanding Clouds from Satellite Images
5,815,864
print(train.dtypes[:5]) print(train.dtypes[:5] )<feature_engineering>
def EfficientUNet(input_shape): backbone = efn.EfficientNetB4( weights=None, include_top=False, input_shape=input_shape ) input = backbone.input x00 = backbone.input x10 = backbone.get_layer('stem_activation' ).output x20 = backbone.get_layer('block2d_add' ).output x30 = backbone.get_layer('block3d_add' ).output x40 = backbone.get_layer('block5f_add' ).output x50 = backbone.get_layer('block7b_add' ).output x01 = H([x00, U(x10)], 'X01') x11 = H([x10, U(x20)], 'X11') x21 = H([x20, U(x30)], 'X21') x31 = H([x30, U(x40)], 'X31') x41 = H([x40, U(x50)], 'X41') x02 = H([x00, x01, U(x11)], 'X02') x12 = H([x11, U(x21)], 'X12') x22 = H([x21, U(x31)], 'X22') x32 = H([x31, U(x41)], 'X32') x03 = H([x00, x01, x02, U(x12)], 'X03') x13 = H([x12, U(x22)], 'X13') x23 = H([x22, U(x32)], 'X23') x04 = H([x00, x01, x02, x03, U(x13)], 'X04') x14 = H([x13, U(x23)], 'X14') x05 = H([x00, x01, x02, x03, x04, U(x14)], 'X05') x_out = Concatenate(name='bridge' )([x01, x02, x03, x04, x05]) x_out = Conv2D(4,(3,3), padding="same", name='final_output', activation="sigmoid" )(x_out) return Model(inputs=input, outputs=x_out) model = EfficientUNet(( 320, 480 ,3)) model.summary()
Understanding Clouds from Satellite Images
5,815,864
xtrain = xtrain / 255.0 test = test / 255.0<categorify>
model.load_weights(".. /input/cloudmodels/EfficientNetB4.h5" )
Understanding Clouds from Satellite Images
5,815,864
print(type(ytrain)) nclasses = ytrain.max() - ytrain.min() + 1 print("Shape of ytrain before: ", ytrain.shape) ytrain = to_categorical(ytrain, num_classes = nclasses) print("Shape of ytrain after: ", ytrain.shape) print(type(ytrain))<split>
minsizes = [20000 ,20000, 22500, 10000]
Understanding Clouds from Satellite Images
5,815,864
seed = 2 np.random.seed(seed) split_pct = 0.1 xtrain, xval, ytrain, yval = train_test_split(xtrain, ytrain, test_size=split_pct, random_state=seed, shuffle=True, stratify=ytrain ) print(xtrain.shape, ytrain.shape, xval.shape, yval.shape )<import_modules>
sigmoid = lambda x: 1 /(1 + np.exp(-x))
Understanding Clouds from Satellite Images
5,815,864
from keras import backend as K from keras.models import Sequential from keras.layers import Dense, Dropout, Lambda, Flatten, BatchNormalization from keras.layers import Conv2D, MaxPool2D, AvgPool2D from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau<choose_model_class>
test_df = [] subsize = 500 for i in range(0, test_imgs.shape[0], subsize): batch_idx = list( range(i, min(test_imgs.shape[0], i + subsize)) ) test_generator = DataGenerator( batch_idx, df=test_imgs, shuffle=False, mode='predict', dim=(350, 525), reshape=(320, 480), gamma=0.8, n_channels=3, base_path='.. /input/understanding_cloud_organization/test_images', target_df=sub_df, batch_size=1, n_classes=4 ) batch_pred_masks = model.predict_generator( test_generator, workers=1, verbose=1 ) for j, b in enumerate(batch_idx): filename = test_imgs['ImageId'].iloc[b] image_df = sub_df[sub_df['ImageId'] == filename].copy() pred_masks = batch_pred_masks[j, ] pred_masks = cv2.resize(pred_masks, dsize=(525, 350), interpolation=cv2.INTER_LINEAR) arrt = np.array([]) for t in range(4): a, num_predict = post_process(sigmoid(pred_masks[:, :, t]), 0.6, minsizes[t]) if(arrt.shape ==(0,)) : arrt = a.reshape(350, 525, 1) else: arrt = np.append(arrt, a.reshape(350, 525, 1), axis = 2) pred_rles = build_rles(arrt, reshape=(350, 525)) image_df['EncodedPixels'] = pred_rles test_df.append(image_df) sub_df = pd.concat(test_df )
Understanding Clouds from Satellite Images
5,815,864
<choose_model_class><EOS>
sub_df = sub_df[['Image_Label', 'EncodedPixels']] sub_df.to_csv('submission.csv', index=False) display(sub_df.head(10))
Understanding Clouds from Satellite Images
5,644,971
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<choose_model_class>
!pip uninstall keras -y !pip install keras==2.2.5 !pip install segmentation-models --quiet !pip install tta-wrapper --quiet
Understanding Clouds from Satellite Images
5,644,971
lr_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )<init_hyperparams>
import os import json import albumentations as albu import cv2 import keras from keras import backend as K from keras.models import Model from keras.layers import Input from keras.layers.convolutional import Conv2D, Conv2DTranspose from keras.layers.pooling import MaxPooling2D from keras.layers.merge import concatenate from keras.losses import binary_crossentropy from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau from skimage.exposure import adjust_gamma import matplotlib.pyplot as plt import numpy as np import pandas as pd from tqdm import tqdm from sklearn.model_selection import train_test_split import segmentation_models as sm
Understanding Clouds from Satellite Images
5,644,971
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=30, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(xtrain )<define_variables>
class AdamAccumulate(Optimizer): def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0., amsgrad=False, accum_iters=1, **kwargs): if accum_iters < 1: raise ValueError('accum_iters must be >= 1') super(AdamAccumulate, self ).__init__(**kwargs) with K.name_scope(self.__class__.__name__): self.iterations = K.variable(0, dtype='int64', name='iterations') self.lr = K.variable(lr, name='lr') self.beta_1 = K.variable(beta_1, name='beta_1') self.beta_2 = K.variable(beta_2, name='beta_2') self.decay = K.variable(decay, name='decay') if epsilon is None: epsilon = K.epsilon() self.epsilon = epsilon self.initial_decay = decay self.amsgrad = amsgrad self.accum_iters = K.variable(accum_iters, K.dtype(self.iterations)) self.accum_iters_float = K.cast(self.accum_iters, K.floatx()) @interfaces.legacy_get_updates_support def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] lr = self.lr completed_updates = K.cast(K.tf.floordiv(self.iterations, self.accum_iters), K.floatx()) if self.initial_decay > 0: lr = lr *(1./(1.+ self.decay * completed_updates)) t = completed_updates + 1 lr_t = lr *(K.sqrt(1.- K.pow(self.beta_2, t)) /(1.- K.pow(self.beta_1, t))) update_switch = K.equal(( self.iterations + 1)% self.accum_iters, 0) update_switch = K.cast(update_switch, K.floatx()) ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] gs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] if self.amsgrad: vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] else: vhats = [K.zeros(1)for _ in params] self.weights = [self.iterations] + ms + vs + vhats for p, g, m, v, vhat, tg in zip(params, grads, ms, vs, vhats, gs): sum_grad = tg + g avg_grad = sum_grad / self.accum_iters_float m_t =(self.beta_1 * m)+(1.- self.beta_1)* avg_grad v_t =(self.beta_2 * v)+(1.- self.beta_2)* K.square(avg_grad) if self.amsgrad: vhat_t = K.maximum(vhat, v_t) p_t = p - lr_t * m_t /(K.sqrt(vhat_t)+ self.epsilon) self.updates.append(K.update(vhat,(1 - update_switch)* vhat + update_switch * vhat_t)) else: p_t = p - lr_t * m_t /(K.sqrt(v_t)+ self.epsilon) self.updates.append(K.update(m,(1 - update_switch)* m + update_switch * m_t)) self.updates.append(K.update(v,(1 - update_switch)* v + update_switch * v_t)) self.updates.append(K.update(tg,(1 - update_switch)* sum_grad)) new_p = p_t if getattr(p, 'constraint', None)is not None: new_p = p.constraint(new_p) self.updates.append(K.update(p,(1 - update_switch)* p + update_switch * new_p)) return self.updates def get_config(self): config = {'lr': float(K.get_value(self.lr)) , 'beta_1': float(K.get_value(self.beta_1)) , 'beta_2': float(K.get_value(self.beta_2)) , 'decay': float(K.get_value(self.decay)) , 'epsilon': self.epsilon, 'amsgrad': self.amsgrad} base_config = super(AdamAccumulate, self ).get_config() return dict(list(base_config.items())+ list(config.items()))
Understanding Clouds from Satellite Images
5,644,971
epochs = 15 batch_size = 64<train_model>
def post_process(probability, threshold, min_size): mask = cv2.threshold(probability, threshold, 1, cv2.THRESH_BINARY)[1] num_component, component = cv2.connectedComponents(mask.astype(np.uint8)) predictions = np.zeros(( 350, 525), np.float32) num = 0 for c in range(1, num_component): p =(component == c) if p.sum() > min_size: predictions[p] = 1 num += 1 return predictions, num
Understanding Clouds from Satellite Images
5,644,971
history = model.fit_generator(datagen.flow(xtrain,ytrain, batch_size=batch_size), epochs=epochs, validation_data=(xval,yval), verbose=1, steps_per_epoch=xtrain.shape[0] // batch_size, callbacks=[lr_reduction] )<save_to_csv>
train_df = pd.read_csv('.. /input/understanding_cloud_organization/train.csv') train_df['ImageId'] = train_df['Image_Label'].apply(lambda x: x.split('_')[0]) train_df['ClassId'] = train_df['Image_Label'].apply(lambda x: x.split('_')[1]) train_df['hasMask'] = ~ train_df['EncodedPixels'].isna() print(train_df.shape) train_df.head()
Understanding Clouds from Satellite Images
5,644,971
predictions = model.predict_classes(test, verbose=1) submissions = pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) , "Label": predictions}) submissions.to_csv("mnist2908.csv", index=False, header=True )<import_modules>
mask_count_df = train_df.groupby('ImageId' ).agg(np.sum ).reset_index() mask_count_df.sort_values('hasMask', ascending=False, inplace=True) print(mask_count_df.shape) mask_count_df.head()
Understanding Clouds from Satellite Images
5,644,971
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC<load_from_csv>
sub_df = pd.read_csv('.. /input/understanding_cloud_organization/sample_submission.csv') sub_df['ImageId'] = sub_df['Image_Label'].apply(lambda x: x.split('_')[0]) test_imgs = pd.DataFrame(sub_df['ImageId'].unique() , columns=['ImageId'] )
Understanding Clouds from Satellite Images
5,644,971
train_data = pd.read_csv('.. /input/train.csv') test_data = pd.read_csv('.. /input/test.csv' )<split>
def np_resize(img, input_shape): height, width = input_shape return cv2.resize(img,(width, height)) def mask2rle(img): pixels= img.T.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs) def rle2mask(rle, input_shape): width, height = input_shape[:2] mask= np.zeros(width*height ).astype(np.uint8) array = np.asarray([int(x)for x in rle.split() ]) starts = array[0::2] lengths = array[1::2] current_position = 0 for index, start in enumerate(starts): mask[int(start):int(start+lengths[index])] = 1 current_position += lengths[index] return mask.reshape(height, width ).T def build_masks(rles, input_shape, reshape=None): depth = len(rles) if reshape is None: masks = np.zeros(( *input_shape, depth)) else: masks = np.zeros(( *reshape, depth)) for i, rle in enumerate(rles): if type(rle)is str: if reshape is None: masks[:, :, i] = rle2mask(rle, input_shape) else: mask = rle2mask(rle, input_shape) reshaped_mask = np_resize(mask, reshape) masks[:, :, i] = reshaped_mask return masks def build_rles(masks, reshape=None): width, height, depth = masks.shape rles = [] for i in range(depth): mask = masks[:, :, i] if reshape: mask = mask.astype(np.float32) mask = np_resize(mask, reshape ).astype(np.int64) rle = mask2rle(mask) rles.append(rle) return rles
Understanding Clouds from Satellite Images