kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
6,338,895
kfolds=StratifiedKFold(n_splits=3) scores=[] svc=SVC(random_state=42) rdf=RandomForestClassifier(random_state=42) ada=AdaBoostClassifier(random_state=42) log=LogisticRegression(random_state=42) grb=GradientBoostingClassifier(random_state=42) dct=DecisionTreeClassifier(random_state=42) ext=ExtraTreesClassifier(random_state=42) xgb=XGBRFClassifier(random_state=42) xgbc=XGBClassifier(random_state=42) models=[svc,rdf,ada,log,grb,dct,ext,xgb,xgbc] for model in models: scores.append(cross_val_score(model,train,target,cv=kfolds,n_jobs=-1)) scores_mean=np.mean(scores,axis=1 )<train_on_grid>
def rle_decode(mask_rle: str = '', shape: tuple =(1400, 2100)) : s = mask_rle.split() starts, lengths = [np.asarray(x, dtype=int)for x in(s[0:][::2], s[1:][::2])] starts -= 1 ends = starts + lengths img = np.zeros(shape[0] * shape[1], dtype=np.uint8) for lo, hi in zip(starts, ends): img[lo:hi] = 1 return img.reshape(shape, order='F' )
Understanding Clouds from Satellite Images
6,338,895
def grid_search(estimator,param,X,y): grid=GridSearchCV(estimator, param, n_jobs=-1, cv=kfolds, return_train_score=True) grid.fit(X,y) grid_results=grid.cv_results_ print(' BestParams and Score : ',grid.best_params_,' ',grid.best_score_) return grid.best_estimator_ , grid, estimator<train_on_grid>
def np_resize(img, input_shape): height, width = input_shape return cv2.resize(img,(width, height)) def mask2rle(img): pixels= img.T.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs) def rle2mask(rle, input_shape): width, height = input_shape[:2] mask= np.zeros(width*height ).astype(np.uint8) array = np.asarray([int(x)for x in rle.split() ]) starts = array[0::2] lengths = array[1::2] current_position = 0 for index, start in enumerate(starts): mask[int(start):int(start+lengths[index])] = 1 current_position += lengths[index] return mask.reshape(height, width ).T def build_masks(rles, input_shape, reshape=None): depth = len(rles) if reshape is None: masks = np.zeros(( *input_shape, depth)) else: masks = np.zeros(( *reshape, depth)) for i, rle in enumerate(rles): if type(rle)is str: if reshape is None: masks[:, :, i] = rle2mask(rle, input_shape) else: mask = rle2mask(rle, input_shape) reshaped_mask = np_resize(mask, reshape) masks[:, :, i] = reshaped_mask return masks def build_rles(masks, reshape=None): width, height, depth = masks.shape rles = [] for i in range(depth): mask = masks[:, :, i] if reshape: mask = mask.astype(np.float32) mask = np_resize(mask, reshape ).astype(np.int64) rle = mask2rle(mask) rles.append(rle) return rles
Understanding Clouds from Satellite Images
6,338,895
def random_grid_search(estimator,param_d,X,y,n): grid=RandomizedSearchCV(estimator, param_d, n_jobs=-1, cv=kfolds, return_train_score=True, n_iter=n) grid.fit(X,y) grid_results=grid.cv_results_ print(' BestParams and Score : ',grid.best_params_,' ',grid.best_score_) return grid.best_estimator_ , grid, estimator<define_search_space>
class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, list_IDs, df, target_df=None, mode='fit', base_path='/kaggle/input/understanding_cloud_organization/train_images', batch_size=32, dim=(1400, 2100), n_channels=3, reshape=None, augment=False, n_classes=4, random_state=2019, shuffle=True): self.dim = dim self.batch_size = batch_size self.df = df self.mode = mode self.base_path = base_path self.target_df = target_df self.list_IDs = list_IDs self.reshape = reshape self.n_channels = n_channels self.augment = augment self.n_classes = n_classes self.shuffle = shuffle self.random_state = random_state self.on_epoch_end() np.random.seed(self.random_state) def __len__(self): 'Denotes the number of batches per epoch' return int(np.floor(len(self.list_IDs)/ self.batch_size)) def __getitem__(self, index): 'Generate one batch of data' indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] list_IDs_batch = [self.list_IDs[k] for k in indexes] X = self.__generate_X(list_IDs_batch) if self.mode == 'fit': y = self.__generate_y(list_IDs_batch) if self.augment: X, y = self.__augment_batch(X, y) print(X,y) return X, y elif self.mode == 'predict': return X else: raise AttributeError('The mode parameter should be set to "fit" or "predict".') def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.list_IDs)) if self.shuffle == True: np.random.seed(self.random_state) np.random.shuffle(self.indexes) def __generate_X(self, list_IDs_batch): 'Generates data containing batch_size samples' if self.reshape is None: X = np.empty(( self.batch_size, *self.dim, self.n_channels)) else: X = np.empty(( self.batch_size, *self.reshape, self.n_channels)) for i, ID in enumerate(list_IDs_batch): im_name = self.df['ImageId'].iloc[ID] img_path = f"{self.base_path}/{im_name}" img = self.__load_rgb(img_path) if self.reshape is not None: img = np_resize(img, self.reshape) X[i,] = img return X def __generate_y(self, list_IDs_batch): if self.reshape is None: y = np.empty(( self.batch_size, *self.dim, self.n_classes), dtype=int) else: y = np.empty(( self.batch_size, *self.reshape, self.n_classes), dtype=int) for i, ID in enumerate(list_IDs_batch): im_name = self.df['ImageId'].iloc[ID] image_df = self.target_df[self.target_df['ImageId'] == im_name] rles = image_df['EncodedPixels'].values if self.reshape is not None: masks = build_masks(rles, input_shape=self.dim, reshape=self.reshape) else: masks = build_masks(rles, input_shape=self.dim) y[i, ] = masks return y def __load_grayscale(self, img_path): img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img = img.astype(np.float32)/ 255. img = np.expand_dims(img, axis=-1) return img def __load_rgb(self, img_path): img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img.astype(np.float32)/ 255. return img def __random_transform(self, img, masks): composition = albu.Compose([ albu.HorizontalFlip() , albu.VerticalFlip() , albu.ShiftScaleRotate(rotate_limit=45, shift_limit=0.15, scale_limit=0.15) ]) composed = composition(image=img, mask=masks) aug_img = composed['image'] aug_masks = composed['mask'] return aug_img, aug_masks def __augment_batch(self, img_batch, masks_batch): for i in range(img_batch.shape[0]): img_batch[i, ], masks_batch[i, ] = self.__random_transform( img_batch[i, ], masks_batch[i, ]) return img_batch, masks_batch
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'C':[0.6, 0.8,1,1.15,1.2,1.23,1.4], 'kernel':['rbf'], 'gamma':[0.1] } ] svc_best, svc_grid, svc=grid_search(SVC(random_state=42, probability=True),param_grid,X=train,y=target )<train_on_grid>
BATCH_SIZE = 4 train_idx, val_idx = train_test_split( mask_count_df.index, random_state=2019, test_size=0.2 ) train_generator = DataGenerator( train_idx, df=mask_count_df, target_df=train_csv, batch_size=BATCH_SIZE, reshape=(320, 480), augment=True, n_channels=3, n_classes=4 ) val_generator = DataGenerator( val_idx, df=mask_count_df, target_df=train_csv, batch_size=BATCH_SIZE, reshape=(320, 480), augment=False, n_channels=3, n_classes=4 )
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'C': uniform(0.9,0.3), 'kernel':['rbf'], 'gamma':[0.1] } ] svc_best, svc_grid, svc =random_grid_search(SVC(random_state=42, probability=True),param_grid,X=train,y=target,n=20 )<define_search_space>
!pip install -U git+https://github.com/qubvel/efficientnet
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'learning_rate':[0.1], 'n_estimators':[30,50,100,200,240], 'max_depth':[2,3,4] } ] gbc_best, gbc_grid, gbc =grid_search(GradientBoostingClassifier(random_state=42),param_grid,X=train,y=target )<train_on_grid>
!pip install segmentation-models --quiet
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'learning_rate':[0.1], 'n_estimators': randint(80,110), 'max_depth':[3] } ] gbc_best, gbc_grid, gbc =random_grid_search(GradientBoostingClassifier(random_state=42),param_grid,X=train,y=target,n=20 )<define_search_space>
from keras.layers import Dense from keras.models import Model from keras.optimizers import Adam, Nadam from keras.callbacks import Callback, ModelCheckpoint from keras.losses import binary_crossentropy import albumentations as albu import segmentation_models as sm
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'penalty':['l1','l2'],'C':[0.1,0.8,0.9,1,1.1,1.2,1.3,5], 'tol':[1e-4,1e-3],'solver':['liblinear']} ] log_best, log_grid, log =grid_search(LogisticRegression(random_state=42),param_grid,X=train,y=target )<train_on_grid>
def dice_coef(y_true, y_pred, smooth=1): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return(2.* intersection + smooth)/(K.sum(y_true_f)+ K.sum(y_pred_f)+ smooth) def dice_loss(y_true, y_pred): smooth = 1. y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = y_true_f * y_pred_f score =(2.* K.sum(intersection)+ smooth)/(K.sum(y_true_f)+ K.sum(y_pred_f)+ smooth) return 1.- score def bce_dice_loss(y_true, y_pred): return binary_crossentropy(y_true, y_pred)+ dice_loss(y_true, y_pred )
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'penalty':['l1'],'C': uniform(0.7,0.3), 'tol':[1e-4],'solver':['liblinear']} ] log_best, log_grid, log =random_grid_search(LogisticRegression(random_state=42),param_grid,X=train,y=target,n=20 )<train_on_grid>
model = sm.Unet( 'resnet18', classes=4, input_shape=(320, 480, 3), activation='sigmoid' ) model.compile(optimizer=Nadam(lr=0.0002), loss=bce_dice_loss, metrics=[dice_coef]) model.summary()
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'n_estimators':randint(20,60),'max_depth':[4], 'gamma':[0.1]} ] xgb_best, xgb_grid, xgb =random_grid_search(XGBRFClassifier(random_state=42),param_grid,X=train,y=target,n=20 )<train_on_grid>
sub_df = pd.read_csv('/kaggle/input/understanding_cloud_organization/sample_submission.csv') sub_df['ImageId'] = sub_df['Image_Label'].apply(lambda x: x.split('_')[0]) test_imgs = pd.DataFrame(sub_df['ImageId'].unique() , columns=['ImageId'] )
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'n_estimators': randint(70,120),'max_depth':[3], 'gamma':[0.1]} ] xgbc_best, xgbc_grid, xgbc =random_grid_search(XGBClassifier(random_state=42),param_grid,X=train,y=target,n=20 )<define_search_space>
resnetpath = '/kaggle/input/resnet18/modelefficient.h5' model.load_weights(resnetpath) test_df = [] for i in range(0, test_imgs.shape[0], 500): batch_idx = list( range(i, min(test_imgs.shape[0], i + 500)) ) test_generator = DataGenerator( batch_idx, df=test_imgs, shuffle=False, mode='predict', dim=(350, 525), reshape=(320, 480), n_channels=3, base_path='/kaggle/input/understanding_cloud_organization/test_images', target_df=sub_df, batch_size=1, n_classes=4 ) batch_pred_masks = model.predict_generator( test_generator, workers=1, verbose=1 ) for j, b in enumerate(batch_idx): filename = test_imgs['ImageId'].iloc[b] image_df = sub_df[sub_df['ImageId'] == filename].copy() pred_masks = batch_pred_masks[j, ].round().astype(int) pred_rles = build_rles(pred_masks, reshape=(350, 525)) image_df['EncodedPixels'] = pred_rles test_df.append(image_df )
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'n_estimators': [30,50,100,200,300],'learning_rate':[0.9,1.0,1.1,1.2,1.3]} ] ada_best, ada_grid, ada =grid_search(AdaBoostClassifier(random_state=42),param_grid,X=train,y=target )<train_on_grid>
os.listdir('/kaggle/input/csvfiledl/SubmissionDLearningFinal.csv' )
Understanding Clouds from Satellite Images
6,338,895
param_grid = [ {'n_estimators': randint(20,50),'learning_rate':[1.3]} ] ada_best, ada_grid, ada =random_grid_search(AdaBoostClassifier(random_state=42),param_grid,X=train,y=target,n=20 )<define_search_space>
df = pd.read_csv('/kaggle/input/csvfiledl/SubmissionDLearningFinal.csv' )
Understanding Clouds from Satellite Images
6,338,895
<train_on_grid><EOS>
df.to_csv('FinaldeepLearning.csv',index = False )
Understanding Clouds from Satellite Images
6,145,495
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<train_on_grid>
seed(10) set_random_seed(10) %matplotlib inline
Understanding Clouds from Satellite Images
6,145,495
param_grid = [ {'n_estimators': randint(80,150),'max_depth':[5], 'criterion':['gini']} ] ext_best, ext_grid, ext =random_grid_search(ExtraTreesClassifier(random_state=42),param_grid,X=train,y=target,n=20 )<train_on_grid>
!pip install keras-rectified-adam
Understanding Clouds from Satellite Images
6,145,495
models=[ ('svc', svc_best), ('log', log_best), ('xgb',xgb_best), ('gbc', gbc_best), ('xgbc', xgbc_best), ('ada', xgbc_best), ('rdf',rdf_best), ('ext',ext_best) ] vot_hard = VotingClassifier(estimators=models, voting='hard', n_jobs=-1) vot_hard.fit(train,target) vot_soft = VotingClassifier(estimators=models, voting='soft', n_jobs=-1) vot_soft.fit(train,target) stack=StackingClassifier(estimators=models,cv=kfolds,n_jobs=-1,stack_method='predict_proba') stack.fit(train,target) prediction_hard=vot_hard.predict(test) prediction_soft=vot_soft.predict(test) prediction_stack=stack.predict(test) <compute_train_metric>
test_imgs_folder = '.. /input/understanding_cloud_organization/test_images/' train_imgs_folder = '.. /input/understanding_cloud_organization/train_images/' num_cores = multiprocessing.cpu_count()
Understanding Clouds from Satellite Images
6,145,495
kfolds=StratifiedKFold(n_splits=3) scores=[] models=[vot_hard,vot_soft,stack] for model in models: scores.append(cross_val_score(model,train,target,cv=kfolds)) scores_mean=np.mean(scores,axis=1 )<save_to_csv>
train_df = pd.read_csv('.. /input/understanding_cloud_organization/train.csv') train_df.head()
Understanding Clouds from Satellite Images
6,145,495
Survived_hard = pd.Series(prediction_hard, name="Survived") result_hard = pd.concat([Id,Survived_hard],axis=1) result_hard.to_csv("submission_hard.csv",index=False) Survived_soft = pd.Series(prediction_soft, name="Survived") result_soft = pd.concat([Id,Survived_soft],axis=1) result_soft.to_csv("submission_soft.csv",index=False) Survived_stack = pd.Series(prediction_stack, name="Survived") result_stack = pd.concat([Id,Survived_stack],axis=1) result_stack.to_csv("submission_stack.csv",index=False )<load_from_csv>
train_df = train_df[~train_df['EncodedPixels'].isnull() ] train_df['Image'] = train_df['Image_Label'].map(lambda x: x.split('_')[0]) train_df['Class'] = train_df['Image_Label'].map(lambda x: x.split('_')[1]) classes = train_df['Class'].unique() train_df = train_df.groupby('Image')['Class'].agg(set ).reset_index() for class_name in classes: train_df[class_name] = train_df['Class'].map(lambda x: 1 if class_name in x else 0) train_df.head()
Understanding Clouds from Satellite Images
6,145,495
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv' )<prepare_x_and_y>
img_2_ohe_vector = {img:vec for img, vec in zip(train_df['Image'], train_df.iloc[:, 2:].values)}
Understanding Clouds from Satellite Images
6,145,495
train_x = train.drop(['Survived'], axis=1) train_y = train['Survived']<create_dataframe>
train_imgs, val_imgs = train_test_split(train_df['Image'].values, test_size=0.17, stratify=train_df['Class'].map(lambda x: str(sorted(list(x)))) , random_state=2019 )
Understanding Clouds from Satellite Images
6,145,495
test_x = test.copy()<import_modules>
class DataGenenerator(Sequence): def __init__(self, images_list=None, folder_imgs=train_imgs_folder, batch_size=16, shuffle=True, augmentation=None, resized_height=256, resized_width=256, num_channels=3): self.batch_size = batch_size self.shuffle = shuffle self.augmentation = augmentation if images_list is None: self.images_list = os.listdir(folder_imgs) else: self.images_list = deepcopy(images_list) self.folder_imgs = folder_imgs self.len = len(self.images_list)// self.batch_size self.resized_height = resized_height self.resized_width = resized_width self.num_channels = num_channels self.num_classes = 4 self.is_test = not 'train' in folder_imgs if not shuffle and not self.is_test: self.labels = [img_2_ohe_vector[img] for img in self.images_list[:self.len*self.batch_size]] def __len__(self): return self.len def on_epoch_start(self): if self.shuffle: random.shuffle(self.images_list) def __getitem__(self, idx): current_batch = self.images_list[idx * self.batch_size:(idx + 1)* self.batch_size] X = np.empty(( self.batch_size, self.resized_height, self.resized_width, self.num_channels)) y = np.empty(( self.batch_size, self.num_classes)) for i, image_name in enumerate(current_batch): path = os.path.join(self.folder_imgs, image_name) img = cv2.resize(cv2.imread(path),(self.resized_height, self.resized_width)).astype(np.float32) if not self.augmentation is None: augmented = self.augmentation(image=img) img = augmented['image'] X[i, :, :, :] = img/255.0 if not self.is_test: y[i, :] = img_2_ohe_vector[image_name] return X, y def get_labels(self): if self.shuffle: images_current = self.images_list[:self.len*self.batch_size] labels = [img_2_ohe_vector[img] for img in images_current] else: labels = self.labels return np.array(labels )
Understanding Clouds from Satellite Images
6,145,495
from sklearn.preprocessing import LabelEncoder<drop_column>
albumentations_train = Compose([ VerticalFlip() , HorizontalFlip() , Rotate(limit=10), GridDistortion() ], p=1 )
Understanding Clouds from Satellite Images
6,145,495
train_x = train_x.drop(['PassengerId'], axis=1) test_x = test_x.drop(['PassengerId'], axis=1 )<drop_column>
data_generator_train = DataGenenerator(train_imgs, augmentation=albumentations_train) data_generator_train_eval = DataGenenerator(train_imgs, shuffle=False) data_generator_val = DataGenenerator(val_imgs, shuffle=False )
Understanding Clouds from Satellite Images
6,145,495
train_x = train_x.drop(['Name', 'Ticket', 'Cabin'], axis=1) test_x = test_x.drop(['Name', 'Ticket', 'Cabin'], axis=1 )<categorify>
class PrAucCallback(Callback): def __init__(self, data_generator, num_workers=num_cores, early_stopping_patience=5, plateau_patience=3, reduction_rate=0.5, stage='train', checkpoints_path='checkpoints/'): super(Callback, self ).__init__() self.data_generator = data_generator self.num_workers = num_workers self.class_names = ['Fish', 'Flower', 'Sugar', 'Gravel'] self.history = [[] for _ in range(len(self.class_names)+ 1)] self.early_stopping_patience = early_stopping_patience self.plateau_patience = plateau_patience self.reduction_rate = reduction_rate self.stage = stage self.best_pr_auc = -float('inf') if not os.path.exists(checkpoints_path): os.makedirs(checkpoints_path) self.checkpoints_path = checkpoints_path def compute_pr_auc(self, y_true, y_pred): pr_auc_mean = 0 print(f" {' ") for class_i in range(len(self.class_names)) : precision, recall, _ = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i]) pr_auc = auc(recall, precision) pr_auc_mean += pr_auc/len(self.class_names) print(f"PR AUC {self.class_names[class_i]}, {self.stage}: {pr_auc:.3f} ") self.history[class_i].append(pr_auc) print(f" {' PR AUC mean, {self.stage}: {pr_auc_mean:.3f} {' ") self.history[-1].append(pr_auc_mean) return pr_auc_mean def is_patience_lost(self, patience): if len(self.history[-1])> patience: best_performance = max(self.history[-1][-(patience + 1):-1]) return best_performance == self.history[-1][-(patience + 1)] and best_performance >= self.history[-1][-1] def early_stopping_check(self, pr_auc_mean): if self.is_patience_lost(self.early_stopping_patience): self.model.stop_training = True def model_checkpoint(self, pr_auc_mean, epoch): if pr_auc_mean > self.best_pr_auc: for checkpoint in glob.glob(os.path.join(self.checkpoints_path, 'classifier_densenet169_epoch_*')) : os.remove(checkpoint) self.best_pr_auc = pr_auc_mean self.model.save(os.path.join(self.checkpoints_path, f'classifier_densenet169_epoch_{epoch}_val_pr_auc_{pr_auc_mean}.h5')) print(f" {' Saved new checkpoint {' ") def reduce_lr_on_plateau(self): if self.is_patience_lost(self.plateau_patience): new_lr = float(keras.backend.get_value(self.model.optimizer.lr)) * self.reduction_rate keras.backend.set_value(self.model.optimizer.lr, new_lr) print(f" {' Reduced learning rate to {new_lr}. {' ") def on_epoch_end(self, epoch, logs={}): y_pred = self.model.predict_generator(self.data_generator, workers=self.num_workers) y_true = self.data_generator.get_labels() pr_auc_mean = self.compute_pr_auc(y_true, y_pred) if self.stage == 'val': self.early_stopping_check(pr_auc_mean) self.model_checkpoint(pr_auc_mean, epoch) self.reduce_lr_on_plateau() def get_pr_auc_history(self): return self.history
Understanding Clouds from Satellite Images
6,145,495
for c in ['Sex', 'Embarked']: le = LabelEncoder() le.fit(train_x[c].fillna('NA')) train_x[c] = le.transform(train_x[c].fillna('NA')) test_x[c] = le.transform(test_x[c].fillna('NA'))<import_modules>
train_metric_callback = PrAucCallback(data_generator_train_eval) val_callback = PrAucCallback(data_generator_val, stage='val' )
Understanding Clouds from Satellite Images
6,145,495
from xgboost import XGBClassifier<train_model>
def dice_coef(y_true, y_pred, smooth=1): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return(2.* intersection + smooth)/(K.sum(y_true_f)+ K.sum(y_pred_f)+ smooth) def dice_loss(y_true, y_pred): smooth = 1. y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = y_true_f * y_pred_f score =(2.* K.sum(intersection)+ smooth)/(K.sum(y_true_f)+ K.sum(y_pred_f)+ smooth) return 1.- score def bce_dice_loss(y_true, y_pred): return binary_crossentropy(y_true, y_pred)+ dice_loss(y_true, y_pred )
Understanding Clouds from Satellite Images
6,145,495
model = XGBClassifier(n_estimators=20, random_state=71) model.fit(train_x, train_y )<predict_on_test>
!pip install -U git+https://github.com/qubvel/efficientnet
Understanding Clouds from Satellite Images
6,145,495
pred = model.predict_proba(test_x)[:, 1]<define_variables>
def get_model() : K.clear_session() base_model = efn.EfficientNetB4(weights='imagenet', include_top=False, pooling='avg', input_shape=(256, 256, 3)) x = base_model.output y_pred = Dense(4, activation='sigmoid' )(x) return Model(inputs=base_model.input, outputs=y_pred) model = get_model()
Understanding Clouds from Satellite Images
6,145,495
pred_label = np.where(pred > 0.5, 1, 0 )<save_to_csv>
from keras_radam import RAdam
Understanding Clouds from Satellite Images
6,145,495
submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': pred_label}) submission.to_csv('submission_first.csv', index=False )<import_modules>
for base_layer in model.layers[:-5]: base_layer.trainable = False model.compile(optimizer=RAdam(warmup_proportion=0.1, min_lr=1e-5), loss=bce_dice_loss) history_0 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=30, callbacks=[train_metric_callback, val_callback], workers=num_cores, verbose=1 )
Understanding Clouds from Satellite Images
6,145,495
from sklearn.metrics import log_loss, accuracy_score from sklearn.model_selection import KFold<define_variables>
for base_layer in model.layers[:-1]: base_layer.trainable = True model.compile(optimizer=RAdam(warmup_proportion=0.1, min_lr=1e-5), loss=bce_dice_loss) history_1 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=20, callbacks=[train_metric_callback, val_callback], workers=num_cores, verbose=1, initial_epoch=1 )
Understanding Clouds from Satellite Images
6,145,495
scores_accuracy = [] scores_logloss = []<train_model>
class_names = ['Fish', 'Flower', 'Sugar', 'Gravel'] def get_threshold_for_recall(y_true, y_pred, class_i, recall_threshold=0.94, precision_threshold=0.94, plot=False): precision, recall, thresholds = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i]) i = len(thresholds)- 1 best_recall_threshold = None while best_recall_threshold is None: next_threshold = thresholds[i] next_recall = recall[i] if next_recall >= recall_threshold: best_recall_threshold = next_threshold i -= 1 best_precision_threshold = [thres for prec, thres in zip(precision, thresholds)if prec >= precision_threshold][0] if plot: plt.figure(figsize=(10, 7)) plt.step(recall, precision, color='r', alpha=0.3, where='post') plt.fill_between(recall, precision, alpha=0.3, color='r') plt.axhline(y=precision[i + 1]) recall_for_prec_thres = [rec for rec, thres in zip(recall, thresholds) if thres == best_precision_threshold][0] plt.axvline(x=recall_for_prec_thres, color='g') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.legend(['PR curve', f'Precision {precision[i + 1]:.2f} corresponding to selected recall threshold', f'Recall {recall_for_prec_thres:.2f} corresponding to selected precision threshold']) plt.title(f'Precision-Recall curve for Class {class_names[class_i]}') return best_recall_threshold, best_precision_threshold y_pred = model.predict_generator(data_generator_val, workers=num_cores) y_true = data_generator_val.get_labels() recall_thresholds = dict() precision_thresholds = dict() for i, class_name in tqdm(enumerate(class_names)) : recall_thresholds[class_name], precision_thresholds[class_name] = get_threshold_for_recall(y_true, y_pred, i, plot=True )
Understanding Clouds from Satellite Images
6,145,495
kf = KFold(n_splits=4, shuffle=True, random_state=71) for tr_idx, va_idx in kf.split(train_x): tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx] tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx] model = XGBClassifier(n_estimators=20, random_state=71) model.fit(tr_x, tr_y) va_pred = model.predict_proba(va_x)[:, 1] logloss = log_loss(va_y, va_pred) accuracy = accuracy_score(va_y, va_pred > 0.5) scores_logloss.append(logloss) scores_accuracy.append(accuracy )<compute_test_metric>
data_generator_test = DataGenenerator(folder_imgs=test_imgs_folder, shuffle=False) y_pred_test = model.predict_generator(data_generator_test, workers=num_cores )
Understanding Clouds from Satellite Images
6,145,495
logloss = np.mean(scores_logloss) accuracy = np.mean(scores_accuracy) print(f'logloss: {logloss:.4f}, accuracy: {accuracy:.4f}' )<import_modules>
image_labels_empty = set() for i,(img, predictions)in enumerate(zip(os.listdir(test_imgs_folder), y_pred_test)) : for class_i, class_name in enumerate(class_names): if predictions[class_i] < recall_thresholds[class_name]: image_labels_empty.add(f'{img}_{class_name}' )
Understanding Clouds from Satellite Images
6,145,495
import itertools<define_search_space>
submission = pd.read_csv('.. /input/efficient-net-b4-unet-clouds/submission.csv') submission.head()
Understanding Clouds from Satellite Images
6,145,495
param_space = { 'max_depth': [3, 5, 7], 'min_child_weight': [1.0, 2.0, 4.0] }<concatenate>
predictions_nonempty = set(submission.loc[~submission['EncodedPixels'].isnull() , 'Image_Label'].values )
Understanding Clouds from Satellite Images
6,145,495
param_combinations = itertools.product(param_space['max_depth'], param_space['min_child_weight'] )<define_variables>
print(f'{len(image_labels_empty.intersection(predictions_nonempty)) } masks would be removed' )
Understanding Clouds from Satellite Images
6,145,495
params = [] scores = []<train_model>
submission.loc[submission['Image_Label'].isin(image_labels_empty), 'EncodedPixels'] = np.nan submission.to_csv('submission_segmentation_and_classifier_efficientnetb4_16btch_size.csv', index=None )
Understanding Clouds from Satellite Images
5,581,710
for max_depth, min_child_weight in param_combinations: score_folds = [] kf = KFold(n_splits=4, shuffle=True, random_state=123456) for tr_idx, va_idx in kf.split(train_x): tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx] tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx] model = XGBClassifier(n_estimators=20, random_state=71, max_depth=max_depth, min_child_weight=min_child_weight) model.fit(tr_x, tr_y) va_pred = model.predict_proba(va_x)[:, 1] logloss = log_loss(va_y, va_pred) score_folds.append(logloss) score_mean = np.mean(score_folds) params.append(( max_depth, min_child_weight)) scores.append(score_mean )<find_best_params>
!pip install catalyst !pip install pretrainedmodels !pip install git+https://github.com/qubvel/segmentation_models.pytorch !pip install pytorch_toolbelt !pip install torchvision==0.4 !pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext".. /input/apex-325f5a0/apex-master/
Understanding Clouds from Satellite Images
5,581,710
best_idx = np.argsort(scores)[0] best_param = params[best_idx] print(f'max_depth: {best_param[0]}, min_child_weight: {best_param[1]}' )<import_modules>
train_on_gpu = True %matplotlib inline device=torch.device('cuda' )
Understanding Clouds from Satellite Images
5,581,710
from sklearn.preprocessing import OneHotEncoder<drop_column>
path = '.. /input/understanding_cloud_organization' img_paths = '.. /input/understanding-clouds-resized' os.listdir(path )
Understanding Clouds from Satellite Images
5,581,710
train_x2 = train.drop(['Survived'], axis=1) test_x2 = test.copy()<drop_column>
train = pd.read_csv(f'{path}/train.csv') sub = pd.read_csv(f'{path}/sample_submission.csv' )
Understanding Clouds from Satellite Images
5,581,710
train_x2 = train_x2.drop(['PassengerId'], axis=1) test_x2 = test_x2.drop(['PassengerId'], axis=1 )<drop_column>
n_train = len(os.listdir(f'{img_paths}/train_images_525/train_images_525')) n_test = len(os.listdir(f'{img_paths}/test_images_525/test_images_525')) print(f'There are {n_train} images in train dataset') print(f'There are {n_test} images in test dataset' )
Understanding Clouds from Satellite Images
5,581,710
train_x2 = train_x2.drop(['Name', 'Ticket', 'Cabin'], axis=1) test_x2 = test_x2.drop(['Name', 'Ticket', 'Cabin'], axis=1 )<categorify>
train['Image_Label'].apply(lambda x: x.split('_')[1] ).value_counts()
Understanding Clouds from Satellite Images
5,581,710
cat_cols = ['Sex', 'Embarked', 'Pclass'] ohe = OneHotEncoder(categories='auto', sparse=False) ohe.fit(train_x2[cat_cols].fillna('NA'))<define_variables>
train.loc[train['EncodedPixels'].isnull() == False, 'Image_Label'].apply(lambda x: x.split('_')[1] ).value_counts()
Understanding Clouds from Satellite Images
5,581,710
ohe_columns = [] for i, c in enumerate(cat_cols): ohe_columns += [f'{c}_{v}' for v in ohe.categories_[i]]<create_dataframe>
train.loc[train['EncodedPixels'].isnull() == False, 'Image_Label'].apply(lambda x: x.split('_')[0] ).value_counts().value_counts()
Understanding Clouds from Satellite Images
5,581,710
ohe_train_x2 = pd.DataFrame(ohe.transform(train_x2[cat_cols].fillna('NA')) , columns=ohe_columns) ohe_test_x2 = pd.DataFrame(ohe.transform(test_x2[cat_cols].fillna('NA')) , columns=ohe_columns )<drop_column>
train['label'] = train['Image_Label'].apply(lambda x: x.split('_')[1]) train['im_id'] = train['Image_Label'].apply(lambda x: x.split('_')[0]) sub['label'] = sub['Image_Label'].apply(lambda x: x.split('_')[1]) sub['im_id'] = sub['Image_Label'].apply(lambda x: x.split('_')[0] )
Understanding Clouds from Satellite Images
5,581,710
train_x2 = train_x2.drop(cat_cols, axis=1) test_x2 = test_x2.drop(cat_cols, axis=1 )<concatenate>
id_mask_count = train.loc[train['EncodedPixels'].isnull() == False, 'Image_Label'].apply(lambda x: x.split('_')[0] ).value_counts().\ reset_index().rename(columns={'index': 'img_id', 'Image_Label': 'count'}) train_ids, valid_ids = train_test_split(id_mask_count['img_id'].values, random_state=42, stratify=id_mask_count['count'], test_size=0.1) test_ids = sub['Image_Label'].apply(lambda x: x.split('_')[0] ).drop_duplicates().values
Understanding Clouds from Satellite Images
5,581,710
train_x2 = pd.concat([train_x2, ohe_train_x2], axis=1) test_x2 = pd.concat([test_x2, ohe_test_x2], axis=1 )<data_type_conversions>
image_name = '8242ba0.jpg' image = get_img(image_name) mask = make_mask(train, image_name )
Understanding Clouds from Satellite Images
5,581,710
num_cols = ['Age', 'SibSp', 'Parch', 'Fare'] for col in num_cols: train_x2[col].fillna(train_x2[col].mean() , inplace=True) test_x2[col].fillna(train_x2[col].mean() , inplace=True )<feature_engineering>
class CloudDataset(Dataset): def __init__(self, df: pd.DataFrame = None, datatype: str = 'train', img_ids: np.array = None, transforms = albu.Compose([albu.HorizontalFlip() ,AT.ToTensor() ]), preprocessing=None): self.df = df if datatype != 'test': self.data_folder = f"{img_paths}/train_images_525/train_images_525" else: self.data_folder = f"{img_paths}/test_images_525/test_images_525" self.img_ids = img_ids self.transforms = transforms self.preprocessing = preprocessing def __getitem__(self, idx): image_name = self.img_ids[idx] mask = make_mask(self.df, image_name) image_path = os.path.join(self.data_folder, image_name) img = cv2.imread(image_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) augmented = self.transforms(image=img, mask=mask) img = augmented['image'] mask = augmented['mask'] if self.preprocessing: preprocessed = self.preprocessing(image=img, mask=mask) img = preprocessed['image'] mask = preprocessed['mask'] return img, mask def __len__(self): return len(self.img_ids )
Understanding Clouds from Satellite Images
5,581,710
train_x2['Fare'] = np.log1p(train_x2['Fare']) test_x2['Fare'] = np.log1p(test_x2['Fare'] )<import_modules>
ENCODER = 'resnet18' ENCODER_WEIGHTS = 'imagenet' ACTIVATION = None model = smp.Unet( encoder_name=ENCODER, encoder_weights=ENCODER_WEIGHTS, classes=4, activation=ACTIVATION, ) preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS )
Understanding Clouds from Satellite Images
5,581,710
from sklearn.linear_model import LogisticRegression<train_model>
num_workers = 4 bs = 32 train_dataset = CloudDataset(df=train, datatype='train', img_ids=train_ids, transforms = get_training_augmentation() , preprocessing=get_preprocessing(preprocessing_fn)) valid_dataset = CloudDataset(df=train, datatype='valid', img_ids=valid_ids, transforms = get_validation_augmentation() , preprocessing=get_preprocessing(preprocessing_fn)) train_loader = DataLoader(train_dataset, batch_size=bs, shuffle=True, num_workers=num_workers) valid_loader = DataLoader(valid_dataset, batch_size=bs, shuffle=False, num_workers=num_workers) loaders = { "train": train_loader, "valid": valid_loader }
Understanding Clouds from Satellite Images
5,581,710
model_xgb = XGBClassifier(n_estimators=20, random_state=71) model_xgb.fit(train_x, train_y) pred_xgb = model_xgb.predict_proba(test_x)[:, 1]<train_model>
def dice_loss(input, target): input = torch.sigmoid(input) smooth = 1.0 iflat = input.view(-1) tflat = target.view(-1) intersection =(iflat * tflat ).sum() return(( 2.0 * intersection + smooth)/(iflat.sum() + tflat.sum() + smooth)) class FocalLoss(nn.Module): def __init__(self, gamma): super().__init__() self.gamma = gamma def forward(self, input, target): if not(target.size() == input.size()): raise ValueError("Target size({})must be the same as input size({})" .format(target.size() , input.size())) max_val =(-input ).clamp(min=0) loss = input - input * target + max_val + \ (( -max_val ).exp() +(-input - max_val ).exp() ).log() invprobs = F.logsigmoid(-input *(target * 2.0 - 1.0)) loss =(invprobs * self.gamma ).exp() * loss return loss.mean() class MixedLoss(nn.Module): def __init__(self, alpha, gamma): super().__init__() self.alpha = alpha self.focal = FocalLoss(gamma) def forward(self, input, target): loss = self.alpha*self.focal(input, target)- torch.log(dice_loss(input, target)) return loss.mean()
Understanding Clouds from Satellite Images
5,581,710
model_lr = LogisticRegression(solver='lbfgs', max_iter=300) model_lr.fit(train_x2, train_y) pred_lr = model_lr.predict_proba(test_x2)[:, 1]<define_variables>
num_epochs = 25 logdir = "./logs/segmentation_unet" optimizer = torch.optim.Adam([ {'params': model.decoder.parameters() , 'lr': 1e-2}, {'params': model.encoder.parameters() , 'lr': 1e-3}, ]) opt_level = 'O1' model.cuda() model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level) scheduler = ReduceLROnPlateau(optimizer, factor=0.5, patience=2) criterion = smp.utils.losses.BCEDiceLoss(eps=1.) runner = SupervisedRunner()
Understanding Clouds from Satellite Images
5,581,710
pred = pred_xgb * 0.8 + pred_lr * 0.2 pred_label = np.where(pred > 0.5, 1, 0 )<save_to_csv>
runner.train( model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, loaders=loaders, callbacks=[DiceCallback() , EarlyStoppingCallback(patience=5, min_delta=0.001)], logdir=logdir, num_epochs=num_epochs, verbose=True )
Understanding Clouds from Satellite Images
5,581,710
submission = pd.DataFrame({'passengerId': test['PassengerId'], 'Survived': pred_label}) submission.to_csv('submission_ensemble.csv', index=False )<set_options>
encoded_pixels = [] loaders = {"infer": valid_loader} runner.infer( model=model, loaders=loaders, callbacks=[ CheckpointCallback( resume=f"{logdir}/checkpoints/best.pth"), InferCallback() ], ) valid_masks = [] probabilities = np.zeros(( 2220, 350, 525), dtype = np.float32) for i,(batch, output)in enumerate(tqdm.tqdm(zip( valid_dataset, runner.callbacks[0].predictions["logits"]))): image, mask = batch for m in mask: if m.shape !=(350, 525): m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR) valid_masks.append(m) for j, probability in enumerate(output): if probability.shape !=(350, 525): probability = cv2.resize(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR) probabilities[i * 4 + j, :, :] = probability
Understanding Clouds from Satellite Images
5,581,710
%matplotlib inline <load_from_csv>
torch.cuda.empty_cache() gc.collect()
Understanding Clouds from Satellite Images
5,581,710
train_df = pd.read_csv('.. /input/train.csv') test_df = pd.read_csv('.. /input/test.csv') combine = [train_df, test_df]<sort_values>
class_params = {} for class_id in range(4): print(class_id) attempts = [] for t in range(0, 100, 5): t /= 100 for ms in [5000, 10000, 15000, 20000, 22500, 25000]: masks = [] for i in range(class_id, len(probabilities), 4): probability = probabilities[i] predict, num_predict = post_process(sigmoid(probability), t, ms) masks.append(predict) d = [] for i, j in zip(masks, valid_masks[class_id::4]): if(i.sum() == 0)&(j.sum() == 0): d.append(1) else: d.append(dice(i, j)) attempts.append(( t, ms, np.mean(d))) attempts_df = pd.DataFrame(attempts, columns=['threshold', 'size', 'dice']) attempts_df = attempts_df.sort_values('dice', ascending=False) print(attempts_df.head()) best_threshold = attempts_df['threshold'].values[0] best_size = attempts_df['size'].values[0] class_params[class_id] =(best_threshold, best_size )
Understanding Clouds from Satellite Images
5,581,710
train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<sort_values>
del masks del valid_masks del probabilities gc.collect()
Understanding Clouds from Satellite Images
5,581,710
train_df[["Sex", "Survived"]].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<sort_values>
print(class_params )
Understanding Clouds from Satellite Images
5,581,710
train_df[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<sort_values>
attempts_df = pd.DataFrame(attempts, columns=['threshold', 'size', 'dice'] )
Understanding Clouds from Satellite Images
5,581,710
train_df[["Parch", "Survived"]].groupby(['Parch'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<feature_engineering>
attempts_df = attempts_df.sort_values('dice', ascending=False) attempts_df.head(10 )
Understanding Clouds from Satellite Images
5,581,710
for dataset in combine: dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex'] )<feature_engineering>
best_threshold = attempts_df['threshold'].values[0] best_size = attempts_df['size'].values[0]
Understanding Clouds from Satellite Images
5,581,710
for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Jonkheer', 'Dona'], 'Lady') dataset['Title'] = dataset['Title'].replace(['Capt', 'Don', 'Major', 'Sir'], 'Sir') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train_df[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()<categorify>
for i,(input, output)in enumerate(zip( valid_dataset, runner.callbacks[0].predictions["logits"])) : image, mask = input image_vis = image.transpose(1, 2, 0) mask = mask.astype('uint8' ).transpose(1, 2, 0) pr_mask = np.zeros(( 350, 525, 4)) for j in range(4): probability = cv2.resize(output[:, :, j], dsize=(525, 350), interpolation=cv2.INTER_LINEAR) pr_mask[:, :, j], _ = post_process(sigmoid(probability), class_params[j][0], class_params[j][1]) visualize_with_raw(image=image_vis, mask=pr_mask, original_image=image_vis, original_mask=mask, raw_image=image_vis, raw_mask=output.transpose(1, 2, 0)) if i >= 2: break
Understanding Clouds from Satellite Images
5,581,710
title_mapping = {"Col": 1, "Dr": 2, "Lady": 3, "Master": 4, "Miss": 5, "Mr": 6, "Mrs": 7, "Rev": 8, "Sir": 9} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_df.head()<drop_column>
torch.cuda.empty_cache() gc.collect()
Understanding Clouds from Satellite Images
5,581,710
train_df = train_df.drop(['Name', 'PassengerId'], axis=1) test_df = test_df.drop(['Name'], axis=1) combine = [train_df, test_df] train_df.shape, test_df.shape<data_type_conversions>
test_dataset = CloudDataset(df=sub, datatype='test', img_ids=test_ids, transforms = get_validation_augmentation() , preprocessing=get_preprocessing(preprocessing_fn)) test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False, num_workers=0) loaders = {"test": test_loader}
Understanding Clouds from Satellite Images
5,581,710
for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int) train_df.head()<define_variables>
del train_dataset, train_loader
Understanding Clouds from Satellite Images
5,581,710
guess_ages = np.zeros(( 2,3)) guess_ages<find_best_params>
del valid_dataset, valid_loader gc.collect()
Understanding Clouds from Satellite Images
5,581,710
for dataset in combine: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i)& \ (dataset['Pclass'] == j+1)]['Age'].dropna() age_guess = guess_df.median() guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)&(dataset.Pclass == j+1),\ 'Age'] = guess_ages[i,j] dataset['Age'] = dataset['Age'].astype(int) train_df.head()<sort_values>
encoded_pixels = [] image_id = 0 for i, test_batch in enumerate(tqdm.tqdm(loaders['test'])) : runner_out = runner.predict_batch({"features": test_batch[0].cuda() })['logits'] for i, batch in enumerate(runner_out): for probability in batch: probability = probability.cpu().detach().numpy() if probability.shape !=(350, 525): probability = cv2.resize(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR) predict, num_predict = post_process(sigmoid(probability), class_params[image_id % 4][0], class_params[image_id % 4][1]) if num_predict == 0: encoded_pixels.append('') else: r = mask2rle(predict) encoded_pixels.append(r) image_id += 1
Understanding Clouds from Satellite Images
5,581,710
<feature_engineering><EOS>
sub['EncodedPixels'] = encoded_pixels sub.to_csv('submission.csv', columns=['Image_Label', 'EncodedPixels'], index=False )
Understanding Clouds from Satellite Images
5,715,541
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<drop_column>
seed(10) set_random_seed(10) %matplotlib inline
Understanding Clouds from Satellite Images
5,715,541
train_df = train_df.drop(['AgeBand'], axis=1) combine = [train_df, test_df] train_df.head()<sort_values>
test_imgs_folder = '.. /input/understanding_cloud_organization/test_images/' train_imgs_folder = '.. /input/understanding_cloud_organization/train_images/' num_cores = multiprocessing.cpu_count()
Understanding Clouds from Satellite Images
5,715,541
for dataset in combine: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<feature_engineering>
train_df = pd.read_csv('.. /input/understanding_cloud_organization/train.csv') train_df.head()
Understanding Clouds from Satellite Images
5,715,541
for dataset in combine: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1 train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean()<drop_column>
train_df = train_df[~train_df['EncodedPixels'].isnull() ] train_df['Image'] = train_df['Image_Label'].map(lambda x: x.split('_')[0]) train_df['Class'] = train_df['Image_Label'].map(lambda x: x.split('_')[1]) classes = train_df['Class'].unique() train_df = train_df.groupby('Image')['Class'].agg(set ).reset_index() for class_name in classes: train_df[class_name] = train_df['Class'].map(lambda x: 1 if class_name in x else 0) train_df.head()
Understanding Clouds from Satellite Images
5,715,541
train_df = train_df.drop(['Parch',], axis=1) test_df = test_df.drop(['Parch'], axis=1) combine = [train_df, test_df] train_df.head()<feature_engineering>
img_2_ohe_vector = {img:vec for img, vec in zip(train_df['Image'], train_df.iloc[:, 2:].values)}
Understanding Clouds from Satellite Images
5,715,541
for dataset in combine: dataset['Age*Class'] = dataset.Age * dataset.Pclass train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10 )<correct_missing_values>
train_imgs, val_imgs = train_test_split(train_df['Image'].values, test_size=0.2, stratify=train_df['Class'].map(lambda x: str(sorted(list(x)))) , random_state=10 )
Understanding Clouds from Satellite Images
5,715,541
freq_port = train_df.Embarked.dropna().mode() [0] freq_port<sort_values>
class DataGenenerator(Sequence): def __init__(self, images_list=None, folder_imgs=train_imgs_folder, batch_size=32, shuffle=True, augmentation=None, resized_height=224, resized_width=224, num_channels=3): self.batch_size = batch_size self.shuffle = shuffle self.augmentation = augmentation if images_list is None: self.images_list = os.listdir(folder_imgs) else: self.images_list = deepcopy(images_list) self.folder_imgs = folder_imgs self.len = len(self.images_list)// self.batch_size self.resized_height = resized_height self.resized_width = resized_width self.num_channels = num_channels self.num_classes = 4 self.is_test = not 'train' in folder_imgs if not shuffle and not self.is_test: self.labels = [img_2_ohe_vector[img] for img in self.images_list[:self.len*self.batch_size]] def __len__(self): return self.len def on_epoch_start(self): if self.shuffle: random.shuffle(self.images_list) def __getitem__(self, idx): current_batch = self.images_list[idx * self.batch_size:(idx + 1)* self.batch_size] X = np.empty(( self.batch_size, self.resized_height, self.resized_width, self.num_channels)) y = np.empty(( self.batch_size, self.num_classes)) for i, image_name in enumerate(current_batch): path = os.path.join(self.folder_imgs, image_name) img = cv2.resize(cv2.imread(path),(self.resized_height, self.resized_width)).astype(np.float32) if not self.augmentation is None: augmented = self.augmentation(image=img) img = augmented['image'] X[i, :, :, :] = img/255.0 if not self.is_test: y[i, :] = img_2_ohe_vector[image_name] return X, y def get_labels(self): if self.shuffle: images_current = self.images_list[:self.len*self.batch_size] labels = [img_2_ohe_vector[img] for img in images_current] else: labels = self.labels return np.array(labels )
Understanding Clouds from Satellite Images
5,715,541
for dataset in combine: dataset['Embarked'] = dataset['Embarked'].fillna(freq_port) train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<data_type_conversions>
albumentations_train = Compose([ VerticalFlip() , HorizontalFlip() , Rotate(limit=30), GridDistortion() ], p=1 )
Understanding Clouds from Satellite Images
5,715,541
for dataset in combine: dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) train_df.head()<data_type_conversions>
data_generator_train = DataGenenerator(train_imgs, augmentation=albumentations_train) data_generator_train_eval = DataGenenerator(train_imgs, shuffle=False) data_generator_val = DataGenenerator(val_imgs, shuffle=False )
Understanding Clouds from Satellite Images
5,715,541
test_df['Fare'].fillna(test_df['Fare'].dropna().median() , inplace=True) test_df.head()<sort_values>
class PrAucCallback(Callback): def __init__(self, data_generator, num_workers=num_cores, early_stopping_patience=5, plateau_patience=3, reduction_rate=0.5, stage='train', checkpoints_path='checkpoints/'): super(Callback, self ).__init__() self.data_generator = data_generator self.num_workers = num_workers self.class_names = ['Fish', 'Flower', 'Sugar', 'Gravel'] self.history = [[] for _ in range(len(self.class_names)+ 1)] self.early_stopping_patience = early_stopping_patience self.plateau_patience = plateau_patience self.reduction_rate = reduction_rate self.stage = stage self.best_pr_auc = -float('inf') if not os.path.exists(checkpoints_path): os.makedirs(checkpoints_path) self.checkpoints_path = checkpoints_path def compute_pr_auc(self, y_true, y_pred): pr_auc_mean = 0 print(f" {' ") for class_i in range(len(self.class_names)) : precision, recall, _ = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i]) pr_auc = auc(recall, precision) pr_auc_mean += pr_auc/len(self.class_names) print(f"PR AUC {self.class_names[class_i]}, {self.stage}: {pr_auc:.3f} ") self.history[class_i].append(pr_auc) print(f" {' PR AUC mean, {self.stage}: {pr_auc_mean:.3f} {' ") self.history[-1].append(pr_auc_mean) return pr_auc_mean def is_patience_lost(self, patience): if len(self.history[-1])> patience: best_performance = max(self.history[-1][-(patience + 1):-1]) return best_performance == self.history[-1][-(patience + 1)] and best_performance >= self.history[-1][-1] def early_stopping_check(self, pr_auc_mean): if self.is_patience_lost(self.early_stopping_patience): self.model.stop_training = True def model_checkpoint(self, pr_auc_mean, epoch): if pr_auc_mean > self.best_pr_auc: for checkpoint in glob.glob(os.path.join(self.checkpoints_path, 'classifier_densenet169_epoch_*')) : os.remove(checkpoint) self.best_pr_auc = pr_auc_mean self.model.save(os.path.join(self.checkpoints_path, f'classifier_densenet169_epoch_{epoch}_val_pr_auc_{pr_auc_mean}.h5')) print(f" {' Saved new checkpoint {' ") def reduce_lr_on_plateau(self): if self.is_patience_lost(self.plateau_patience): new_lr = float(keras.backend.get_value(self.model.optimizer.lr)) * self.reduction_rate keras.backend.set_value(self.model.optimizer.lr, new_lr) print(f" {' Reduced learning rate to {new_lr}. {' ") def on_epoch_end(self, epoch, logs={}): y_pred = self.model.predict_generator(self.data_generator, workers=self.num_workers) y_true = self.data_generator.get_labels() pr_auc_mean = self.compute_pr_auc(y_true, y_pred) if self.stage == 'val': self.early_stopping_check(pr_auc_mean) self.model_checkpoint(pr_auc_mean, epoch) self.reduce_lr_on_plateau() def get_pr_auc_history(self): return self.history
Understanding Clouds from Satellite Images
5,715,541
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4) train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False ).mean().sort_values(by='FareBand', ascending=True )<data_type_conversions>
train_metric_callback = PrAucCallback(data_generator_train_eval) val_callback = PrAucCallback(data_generator_val, stage='val' )
Understanding Clouds from Satellite Images
5,715,541
for dataset in combine: dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0 dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1 dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2 dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3 dataset['Fare'] = dataset['Fare'].astype(int) train_df = train_df.drop(['FareBand'], axis=1) combine = [train_df, test_df] train_df.head(10 )<prepare_x_and_y>
def get_model() : K.clear_session() base_model = DenseNet169(weights='imagenet', include_top=False, pooling='avg', input_shape=(224, 224, 3)) x = base_model.output y_pred = Dense(4, activation='sigmoid' )(x) return Model(inputs=base_model.input, outputs=y_pred) model = get_model()
Understanding Clouds from Satellite Images
5,715,541
X_train = train_df.drop("Survived", axis=1) Y_train = train_df["Survived"] X_test = test_df.drop("PassengerId", axis=1 ).copy() X_train.head(10 )<compute_train_metric>
for base_layer in model.layers[:-1]: base_layer.trainable = False model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy') history_0 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=1, callbacks=[train_metric_callback, val_callback], workers=num_cores, verbose=1 )
Understanding Clouds from Satellite Images
5,715,541
logreg = LogisticRegression() logreg.fit(X_train, Y_train) Y_pred = logreg.predict(X_test) acc_log = round(logreg.score(X_train, Y_train)* 100, 2) acc_log<compute_train_metric>
for base_layer in model.layers[:-1]: base_layer.trainable = True model.compile(optimizer=Adam(lr=1e-5), loss='binary_crossentropy') history_1 = model.fit_generator(generator=data_generator_train, validation_data=data_generator_val, epochs=2, callbacks=[train_metric_callback, val_callback], workers=num_cores, verbose=1, initial_epoch=1 )
Understanding Clouds from Satellite Images
5,715,541
svc = SVC() svc.fit(X_train, Y_train) Y_pred = svc.predict(X_test) acc_svc = round(svc.score(X_train, Y_train)* 100, 2) acc_svc<predict_on_test>
model = load_model('.. /input/clouds-classifier-files/classifier_densenet169_epoch_21_val_pr_auc_0.8365921057512743.h5' )
Understanding Clouds from Satellite Images
5,715,541
knn = KNeighborsClassifier(n_neighbors = 3) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) acc_knn = round(knn.score(X_train, Y_train)* 100, 2) acc_knn<predict_on_test>
Image(".. /input/clouds-classifier-files/loss_hist_densenet169.png" )
Understanding Clouds from Satellite Images
5,715,541
gaussian = GaussianNB() gaussian.fit(X_train, Y_train) Y_pred = gaussian.predict(X_test) acc_gaussian = round(gaussian.score(X_train, Y_train)* 100, 2) acc_gaussian<compute_train_metric>
Image(".. /input/clouds-classifier-files/pr_auc_hist_densenet169.png" )
Understanding Clouds from Satellite Images
5,715,541
perceptron = Perceptron() perceptron.fit(X_train, Y_train) Y_pred = perceptron.predict(X_test) acc_perceptron = round(perceptron.score(X_train, Y_train)* 100, 2) acc_perceptron<choose_model_class>
class_names = ['Fish', 'Flower', 'Sugar', 'Gravel'] def get_threshold_for_recall(y_true, y_pred, class_i, recall_threshold=0.95, precision_threshold=0.94, plot=False): precision, recall, thresholds = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i]) i = len(thresholds)- 1 best_recall_threshold = None while best_recall_threshold is None: next_threshold = thresholds[i] next_recall = recall[i] if next_recall >= recall_threshold: best_recall_threshold = next_threshold i -= 1 best_precision_threshold = [thres for prec, thres in zip(precision, thresholds)if prec >= precision_threshold][0] if plot: plt.figure(figsize=(10, 7)) plt.step(recall, precision, color='r', alpha=0.3, where='post') plt.fill_between(recall, precision, alpha=0.3, color='r') plt.axhline(y=precision[i + 1]) recall_for_prec_thres = [rec for rec, thres in zip(recall, thresholds) if thres == best_precision_threshold][0] plt.axvline(x=recall_for_prec_thres, color='g') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.legend(['PR curve', f'Precision {precision[i + 1]:.2f} corresponding to selected recall threshold', f'Recall {recall_for_prec_thres:.2f} corresponding to selected precision threshold']) plt.title(f'Precision-Recall curve for Class {class_names[class_i]}') return best_recall_threshold, best_precision_threshold y_pred = model.predict_generator(data_generator_val, workers=num_cores) y_true = data_generator_val.get_labels() recall_thresholds = dict() precision_thresholds = dict() for i, class_name in tqdm(enumerate(class_names)) : recall_thresholds[class_name], precision_thresholds[class_name] = get_threshold_for_recall(y_true, y_pred, i, plot=True )
Understanding Clouds from Satellite Images
5,715,541
decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, Y_train) Y_pred = decision_tree.predict(X_test) acc_decision_tree = round(decision_tree.score(X_train, Y_train)* 100, 2) acc_decision_tree<compute_train_metric>
data_generator_test = DataGenenerator(folder_imgs=test_imgs_folder, shuffle=False) y_pred_test = model.predict_generator(data_generator_test, workers=num_cores )
Understanding Clouds from Satellite Images
5,715,541
random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, Y_train) Y_pred = random_forest.predict(X_test) random_forest.score(X_train, Y_train) acc_random_forest = round(random_forest.score(X_train, Y_train)* 100, 2) acc_random_forest<train_model>
image_labels_empty = set() for i,(img, predictions)in enumerate(zip(os.listdir(test_imgs_folder), y_pred_test)) : for class_i, class_name in enumerate(class_names): if predictions[class_i] < recall_thresholds[class_name]: image_labels_empty.add(f'{img}_{class_name}' )
Understanding Clouds from Satellite Images
5,715,541
grad_boost = GradientBoostingClassifier(n_estimators = 100) grad_boost.fit(X_train, Y_train) Y_pred = grad_boost.predict(X_test) grad_boost.score(X_train, Y_train) acc_grad_boost = round(grad_boost.score(X_train, Y_train)* 100, 2) acc_grad_boost<compute_train_metric>
submission = pd.read_csv('.. /input/efficient-net-b4-unet-clouds/submission.csv') submission.head()
Understanding Clouds from Satellite Images
5,715,541
Ridge= RidgeClassifierCV() Ridge.fit(X_train, Y_train) Y_pred = Ridge.predict(X_test) acc_Ridge= round(Ridge.score(X_train, Y_train)* 100, 2) acc_Ridge<create_dataframe>
predictions_nonempty = set(submission.loc[~submission['EncodedPixels'].isnull() , 'Image_Label'].values )
Understanding Clouds from Satellite Images
5,715,541
models = pd.DataFrame({ 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron', 'Grad boost','Decision Tree'], 'Score': [acc_svc, acc_knn, acc_log, acc_random_forest, acc_gaussian, acc_perceptron, acc_grad_boost, acc_decision_tree]}) models.sort_values(by='Score', ascending=False )<split>
print(f'{len(image_labels_empty.intersection(predictions_nonempty)) } masks would be removed' )
Understanding Clouds from Satellite Images
5,715,541
<train_on_grid><EOS>
submission.loc[submission['Image_Label'].isin(image_labels_empty), 'EncodedPixels'] = np.nan submission.to_csv('submission_segmentation_and_classifier.csv', index=None )
Understanding Clouds from Satellite Images
6,632,007
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<train_model>
kernel_start = time.time() LIMIT = 8.8 DO_TRAIN = True DO_TEST = True USE_TTA = True RAND = 12345 !pip install tensorflow-gpu==1.14.0 --quiet !pip install keras==2.2.4 --quiet !pip install segmentation-models --quiet
Understanding Clouds from Satellite Images
6,632,007
best_clf.fit(X_dev, y_dev) acc_eval = accuracy_score(y_eval, best_clf.predict(X_eval)) dict_clf[name] = { 'best_par': best_params, 'best_clf': best_clf, 'best_score': best_score, 'score_eval': acc_eval, 'fit_time': t, } acc_eval<train_on_grid>
sub = pd.read_csv('.. /input/understanding_cloud_organization/sample_submission.csv') sub['Image'] = sub['Image_Label'].map(lambda x: x.split('.')[0]) sub['Label'] = sub['Image_Label'].map(lambda x: x.split('_')[1]) sub['p'] = pd.read_csv('.. /input/cloud-classifiers/pred_cls.csv' ).p.values sub['p'] += np.load('.. /input/cloud-classifiers/pred_cls0.npy' ).reshape(( -1)) * 0.5 sub['p'] += pd.read_csv('.. /input/cloud-classifiers/pred_cls3.csv' ).p.values * 3.0 sub['p'] += np.load('/kaggle/input/cloud-classifiers/pred_cls4b.npy')* 0.6 sub['p'] /= 5.1 train = pd.read_csv('.. /input/cloud-images-resized/train_384x576.csv') train['Image'] = train['Image_Label'].map(lambda x: x.split('.')[0]) train['Label'] = train['Image_Label'].map(lambda x: x.split('_')[1]) train2 = pd.DataFrame({'Image':train['Image'][::4]}) train2['e1'] = train['EncodedPixels'][::4].values train2['e2'] = train['EncodedPixels'][1::4].values train2['e3'] = train['EncodedPixels'][2::4].values train2['e4'] = train['EncodedPixels'][3::4].values train2.set_index('Image',inplace=True,drop=True) train2.fillna('',inplace=True); train2.head() train2[['d1','d2','d3','d4']] =(train2[['e1','e2','e3','e4']]!='' ).astype('int8') for k in range(1,5): train2['o'+str(k)] = 0 train2[['o1','o2','o3','o4']] = np.load('.. /input/cloud-classifiers/oof_cls.npy') train2[['o1','o2','o3','o4']] += np.load('.. /input/cloud-classifiers/oof_cls0.npy')* 0.5 train2[['o1','o2','o3','o4']] += np.load('.. /input/cloud-classifiers/oof_cls3.npy')* 3.0 train2[['o1','o2','o3','o4']] += np.load('.. /input/cloud-classifiers/oof_cls4b.npy')* 0.6 train2[['o1','o2','o3','o4']] /= 5.1 train2.head()
Understanding Clouds from Satellite Images
6,632,007
paramgrid = { 'n_estimators': [100, 150, 200, 250, 300, 400, 500], 'max_features': ['auto', 'log2'], 'min_samples_leaf': list(range(2, 7)) , 'loss' : ['deviance', 'exponential'], 'learning_rate': [0.025, 0.05, 0.075, 0.1], } GS = GridSearchCV(GradientBoostingClassifier(random_state=77), paramgrid, cv=4) t0 = time.time() GS.fit(X_dev, y_dev) t = time.time() - t0 best_clf = GS.best_estimator_ best_params = GS.best_params_ best_score = GS.best_score_ name = 'GB' best_clf.fit(X_dev, y_dev) acc_eval = accuracy_score(y_eval, best_clf.predict(X_eval)) dict_clf[name] = { 'best_par': best_params, 'best_clf': best_clf, 'best_score': best_score, 'score_eval': acc_eval, 'fit_time': t, }<train_model>
def mask2rleXXX(img0, shape=(576,384), grow=(525,350)) : a =(shape[1]-img0.shape[0])//2 b =(shape[0]-img0.shape[1])//2 img = np.zeros(( shape[1],shape[0]),dtype=np.uint8) img[a:a+img0.shape[0],b:b+img0.shape[1]] = img0 img = cv2.resize(img,grow) pixels= img.T.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs) def rle2maskX(mask_rle, shape=(2100,1400), shrink=1): s = mask_rle.split() starts, lengths = [np.asarray(x, dtype=int)for x in(s[0:][::2], s[1:][::2])] starts -= 1 ends = starts + lengths img = np.zeros(shape[0]*shape[1], dtype=np.uint8) for lo, hi in zip(starts, ends): img[lo:hi] = 1 return img.reshape(shape ).T[::shrink,::shrink] def dice_coef(y_true, y_pred, smooth=1): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return(2.* intersection + smooth)/(K.sum(y_true_f)+ K.sum(y_pred_f)+ smooth )
Understanding Clouds from Satellite Images
6,632,007
for clf in dict_clf.keys() : print("{0} classifier: \t- Best score = {1:.2%}".format(clf, dict_clf[clf]['best_score'])) print("\t- Score on evaluation set = {0:.2%}".format(dict_clf[clf]['score_eval'])) print("\t- Fitting time = {0:.1f} min".format(round(dict_clf[clf]['fit_time']/60, 1))) print("\t- Best parameters:") for par in sorted(dict_clf[clf]['best_par'].keys()): print("\t\t* {0}: {1}".format(par, dict_clf[clf]['best_par'][par]))<predict_on_test>
class DataGenerator2(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, list_IDs, batch_size=24, shuffle=False, width=544, height=352, scale=1/128., sub=1., mode='train_seg', path='.. /input/cloud-images-resized/train_images_384x576/', flips=False, augment=False, shrink1=1, shrink2=1, dim=(576,384), clean=False): 'Initialization' self.list_IDs = list_IDs self.shuffle = shuffle self.batch_size = batch_size self.path = path self.scale = scale self.sub = sub self.path = path self.width = width self.height = height self.mode = mode self.flips = flips self.augment = augment self.shrink1 = shrink1 self.shrink2 = shrink2 self.dim = dim self.clean = clean self.on_epoch_end() def __len__(self): 'Denotes the number of batches per epoch' ct = int(np.floor(len(self.list_IDs)/ self.batch_size)) if len(self.list_IDs)>ct*self.batch_size: ct += 1 return int(ct) def __getitem__(self, index): 'Generate one batch of data' indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] X, msk = self.__data_generation(indexes) if self.augment: X, msk = self.__augment_batch(X, msk) if(self.mode=='train_seg')|(self.mode=='validate_seg'): return X, msk else: return X def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(int(len(self.list_IDs))) if self.shuffle: np.random.shuffle(self.indexes) def __data_generation(self, indexes): 'Generates data containing batch_size samples' lnn = len(indexes); ex = self.shrink1; ax = self.shrink2 X = np.empty(( lnn,self.height,self.width,3),dtype=np.float32) msk = np.empty(( lnn,self.height//ax,self.width//ax,4),dtype=np.int8) for k in range(lnn): img = cv2.imread(self.path + self.list_IDs[indexes[k]]+'.jpg') img = img[::ex,::ex,:] hflip = False; vflip = False if(self.flips): if np.random.uniform(0,1)>0.5: hflip=True if np.random.uniform(0,1)>0.5: vflip=True if vflip: img = cv2.flip(img,0) if hflip: img = cv2.flip(img,1) a = np.random.randint(0,self.dim[0]//ex//ax-self.width//ax+1) b = np.random.randint(0,self.dim[1]//ex//ax-self.height//ax+1) if(self.mode=='predict'): a =(self.dim[0]//ex//ax-self.width//ax)//2 b =(self.dim[1]//ex//ax-self.height//ax)//2 img = img[b*ax:self.height+b*ax,a*ax:self.width+a*ax] X[k,] = img*self.scale - self.sub if(self.mode!='predict'): for j in range(1,5): rle = train2.loc[self.list_IDs[indexes[k]],'e'+str(j)] if self.clean: if train2.loc[self.list_IDs[indexes[k]],'o'+str(j)]<0.4: rle = '' mask = rle2maskX(rle,shrink=ex*ax,shape=self.dim) if vflip: mask = np.flip(mask,axis=0) if hflip: mask = np.flip(mask,axis=1) msk[k,:,:,j-1] = mask[b:self.height//ax+b,a:self.width//ax+a] return X, msk def __random_transform(self, img, masks): composition = albu.Compose([ albu.ShiftScaleRotate(rotate_limit=30, scale_limit=0.1, p=0.5) ]) composed = composition(image=img, mask=masks) aug_img = composed['image'] aug_masks = composed['mask'] return aug_img, aug_masks def __augment_batch(self, img_batch, masks_batch): for i in range(img_batch.shape[0]): img_batch[i, ], masks_batch[i, ] = self.__random_transform( img_batch[i, ], masks_batch[i, ]) return img_batch, masks_batch
Understanding Clouds from Satellite Images
6,632,007
estimators = [('RF', dict_clf['RF']['best_clf']), ('GB', dict_clf['GB']['best_clf']), ('KNN', knn),('svc', svc),('trees', decision_tree)] voter = VotingClassifier(estimators=estimators, voting='hard') voter.fit(X_train, Y_train) Y_pred = voter.predict(X_test ).astype(int )<save_to_csv>
class AdamAccumulate(Optimizer): def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0., amsgrad=False, accum_iters=8, **kwargs): if accum_iters < 1: raise ValueError('accum_iters must be >= 1') super(AdamAccumulate, self ).__init__(**kwargs) with K.name_scope(self.__class__.__name__): self.iterations = K.variable(0, dtype='int64', name='iterations') self.lr = K.variable(lr, name='lr') self.beta_1 = K.variable(beta_1, name='beta_1') self.beta_2 = K.variable(beta_2, name='beta_2') self.decay = K.variable(decay, name='decay') if epsilon is None: epsilon = K.epsilon() self.epsilon = epsilon self.initial_decay = decay self.amsgrad = amsgrad self.accum_iters = K.variable(accum_iters, K.dtype(self.iterations)) self.accum_iters_float = K.cast(self.accum_iters, K.floatx()) @interfaces.legacy_get_updates_support def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] lr = self.lr completed_updates = K.cast(K.tf.floordiv(self.iterations, self.accum_iters), K.floatx()) if self.initial_decay > 0: lr = lr *(1./(1.+ self.decay * completed_updates)) t = completed_updates + 1 lr_t = lr *(K.sqrt(1.- K.pow(self.beta_2, t)) /(1.- K.pow(self.beta_1, t))) update_switch = K.equal(( self.iterations + 1)% self.accum_iters, 0) update_switch = K.cast(update_switch, K.floatx()) ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] gs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] if self.amsgrad: vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] else: vhats = [K.zeros(1)for _ in params] self.weights = [self.iterations] + ms + vs + vhats for p, g, m, v, vhat, tg in zip(params, grads, ms, vs, vhats, gs): sum_grad = tg + g avg_grad = sum_grad / self.accum_iters_float m_t =(self.beta_1 * m)+(1.- self.beta_1)* avg_grad v_t =(self.beta_2 * v)+(1.- self.beta_2)* K.square(avg_grad) if self.amsgrad: vhat_t = K.maximum(vhat, v_t) p_t = p - lr_t * m_t /(K.sqrt(vhat_t)+ self.epsilon) self.updates.append(K.update(vhat,(1 - update_switch)* vhat + update_switch * vhat_t)) else: p_t = p - lr_t * m_t /(K.sqrt(v_t)+ self.epsilon) self.updates.append(K.update(m,(1 - update_switch)* m + update_switch * m_t)) self.updates.append(K.update(v,(1 - update_switch)* v + update_switch * v_t)) self.updates.append(K.update(tg,(1 - update_switch)* sum_grad)) new_p = p_t if getattr(p, 'constraint', None)is not None: new_p = p.constraint(new_p) self.updates.append(K.update(p,(1 - update_switch)* p + update_switch * new_p)) return self.updates def get_config(self): config = {'lr': float(K.get_value(self.lr)) , 'beta_1': float(K.get_value(self.beta_1)) , 'beta_2': float(K.get_value(self.beta_2)) , 'decay': float(K.get_value(self.decay)) , 'epsilon': self.epsilon, 'amsgrad': self.amsgrad} base_config = super(AdamAccumulate, self ).get_config() return dict(list(base_config.items())+ list(config.items()))
Understanding Clouds from Satellite Images