kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
14,556,937
sys.path = [ '.. /input/smp20210127/pytorch-image-models-master/pytorch-image-models-master', '.. /input/hpapytorchzoozip/pytorch_zoo-master/', '.. /input/hpa-seg/HPA-Cell-Segmentation/hpacellseg', '.. /input/hpafinal' ] + sys.path warnings.filterwarnings("ignore" )<import_modules>
cnn_model = Sequential() cnn_model.add(Conv2D(filters = 32, kernel_size =(5, 5), padding = 'same', activation ='relu', input_shape =(28,28,1))) cnn_model.add(Conv2D(filters = 32, kernel_size =(5, 5), padding = 'same', activation ='relu')) cnn_model.add(MaxPool2D(pool_size=(2, 2))) cnn_model.add(Dropout(0.25)) cnn_model.add(Conv2D(filters = 32, kernel_size =(5, 5), padding = 'same', activation ='relu')) cnn_model.add(Conv2D(filters = 32, kernel_size =(5, 5), padding = 'same', activation ='relu')) cnn_model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) cnn_model.add(Dropout(0.25)) cnn_model.add(Conv2D(filters = 64, kernel_size =(3, 3), padding = 'same', activation ='relu')) cnn_model.add(Conv2D(filters = 64, kernel_size =(3, 3), padding = 'same', activation ='relu')) cnn_model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2))) cnn_model.add(Dropout(0.25)) cnn_model.add(Flatten()) cnn_model.add(Dense(256, activation = "relu")) cnn_model.add(Dropout(0.5)) cnn_model.add(Dense(10, activation = "softmax")) print(cnn_model.summary() )
Digit Recognizer
14,556,937
remove_small_holes, remove_small_objects) device = torch.device('cuda' )<define_variables>
cnn_model.compile( optimizer = 'adam', loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
14,556,937
seg_size = 512 seg_bs = 8388608 // seg_size ** 2 seg_TTA = 8 small_th_dict = { 2048: 500, 1024: 125, 512 : 32, } small_th = small_th_dict[seg_size] mask_dir = 'test_mask_npz_fullsize_cell_mask' model_dirs = [ '.. /input/bo-hpa-models', '.. /input/bo-hpa-models-3d256', '.. /input/hpa-models', '.. /input/hpa-models-qishen', ] TTA = { 'orig': 2, 'masked': 3, 'cells_128': 8, 'cells_256': 6, 'center_cells': 3, } n_ch = 4 num_classes = 19 image_size = 512 orig_mean = [239.93038613, 246.05603962, 250.16871503, 250.50623682] data_dir = '.. /input/hpa-single-cell-image-classification/test/' df_sub = pd.read_csv('.. /input/hpa-single-cell-image-classification/sample_submission.csv') df_sub = df_sub.head(10)if df_sub.shape[0] == 559 else df_sub df_sub.shape<normalization>
early_stopping = keras.callbacks.EarlyStopping( patience=5, min_delta=0.001, restore_best_weights=True, )
Digit Recognizer
14,556,937
NORMALIZE = {"mean": [124 / 255, 117 / 255, 104 / 255], "std": [1 /(0.0167 * 255)] * 3} def get_trans_seg(img, I, rev=False): if I >= 4 and not rev: img = img.transpose(2,3) if I % 4 == 0: pass elif I % 4 == 1: img = img.flip(2) elif I % 4 == 2: img = img.flip(3) elif I % 4 == 3: img = img.flip(2 ).flip(3) if I >= 4 and rev: img = img.transpose(2,3) return img class CellSegmentator(object): def __init__( self, nuclei_model=".. /input/hpa-seg/dpn_unet_nuclei_v1.pth", cell_model=".. /input/hpa-seg/dpn_unet_cell_3ch_v1.pth", scale_factor=1.0, device="cuda", multi_channel_model=True, ): if device != "cuda" and device != "cpu" and "cuda" not in device: raise ValueError(f"{device} is not a valid device(cuda/cpu)") if device != "cpu": try: assert torch.cuda.is_available() except AssertionError: print("No GPU found, using CPU.", file=sys.stderr) device = "cpu" self.device = device if isinstance(nuclei_model, str): if not os.path.exists(nuclei_model): print( f"Could not find {nuclei_model}.Downloading it now", file=sys.stderr, ) raise nuclei_model = torch.load( nuclei_model, map_location=torch.device(self.device) ) if isinstance(nuclei_model, torch.nn.DataParallel)and device == "cpu": nuclei_model = nuclei_model.module self.nuclei_model = nuclei_model.to(self.device ).eval() self.multi_channel_model = multi_channel_model if isinstance(cell_model, str): if not os.path.exists(cell_model): print( f"Could not find {cell_model}.Downloading it now", file=sys.stderr ) raise cell_model = torch.load(cell_model, map_location=torch.device(self.device)) self.cell_model = cell_model.to(self.device ).eval() self.scale_factor = scale_factor def _image_conversion(self, images): microtubule_imgs, er_imgs, nuclei_imgs = images if self.multi_channel_model: if not isinstance(er_imgs, list): raise ValueError("Please speicify the image path(s)for er channels!") else: if not er_imgs is None: raise ValueError( "second channel should be None for two channel model predition!" ) if not isinstance(microtubule_imgs, list): raise ValueError("The microtubule images should be a list") if not isinstance(nuclei_imgs, list): raise ValueError("The microtubule images should be a list") if er_imgs: if not len(microtubule_imgs)== len(er_imgs)== len(nuclei_imgs): raise ValueError("The lists of images needs to be the same length") else: if not len(microtubule_imgs)== len(nuclei_imgs): raise ValueError("The lists of images needs to be the same length") if not all(isinstance(item, np.ndarray)for item in microtubule_imgs): microtubule_imgs = [ os.path.expanduser(item)for _, item in enumerate(microtubule_imgs) ] nuclei_imgs = [ os.path.expanduser(item)for _, item in enumerate(nuclei_imgs) ] microtubule_imgs = list( map(lambda item: imageio.imread(item), microtubule_imgs) ) nuclei_imgs = list(map(lambda item: imageio.imread(item), nuclei_imgs)) if er_imgs: er_imgs = [os.path.expanduser(item)for _, item in enumerate(er_imgs)] er_imgs = list(map(lambda item: imageio.imread(item), er_imgs)) if not er_imgs: er_imgs = [ np.zeros(item.shape, dtype=item.dtype) for _, item in enumerate(microtubule_imgs) ] cell_imgs = list( map( lambda item: np.dstack(( item[0], item[1], item[2])) , list(zip(microtubule_imgs, er_imgs, nuclei_imgs)) , ) ) return cell_imgs def pred_nuclei(self, images): def _preprocess(image): self.target_shape = image.shape if len(image.shape)== 2: image = np.dstack(( image, image, image)) image = transform.rescale(image, self.scale_factor, multichannel=True) nuc_image = np.dstack(( image[..., 2], image[..., 2], image[..., 2])) nuc_image = nuc_image.transpose([2, 0, 1]) return nuc_image def _segment_helper(imgs): with torch.no_grad() : mean = torch.as_tensor(NORMALIZE["mean"], device=self.device) std = torch.as_tensor(NORMALIZE["std"], device=self.device) imgs = torch.tensor(imgs ).float() imgs = imgs.to(self.device) imgs = imgs.sub_(mean[:, None, None] ).div_(std[:, None, None]) imgs = torch.stack([get_trans_seg(self.nuclei_model(get_trans_seg(imgs, I)) , I, True ).softmax(1)for I in range(1)], 0 ).mean(0) return imgs preprocessed_imgs = list(map(_preprocess, images)) bs = 24 predictions = [] for i in range(0, len(preprocessed_imgs), bs): start = i end = min(len(preprocessed_imgs), i+bs) x = preprocessed_imgs[start:end] pred = _segment_helper(x ).cpu().numpy() predictions.append(pred) predictions = list(np.concatenate(predictions, axis=0)) predictions = map(util.img_as_ubyte, predictions) predictions = list(map(self._restore_scaling_padding, predictions)) return predictions def _restore_scaling_padding(self, n_prediction): n_prediction = n_prediction.transpose([1, 2, 0]) if not self.scale_factor == 1: n_prediction[..., 0] = 0 n_prediction = cv2.resize( n_prediction, (self.target_shape[0], self.target_shape[1]), interpolation=cv2.INTER_AREA, ) return n_prediction def pred_cells(self, images, precombined=False): def _preprocess(image): self.target_shape = image.shape if not len(image.shape)== 3: raise ValueError("image should has 3 channels") cell_image = transform.rescale(image, self.scale_factor, multichannel=True) cell_image = cell_image.transpose([2, 0, 1]) return cell_image def _segment_helper(imgs): with torch.no_grad() : mean = torch.as_tensor(NORMALIZE["mean"], device=self.device) std = torch.as_tensor(NORMALIZE["std"], device=self.device) imgs = torch.tensor(imgs ).float() imgs = imgs.to(self.device) imgs = imgs.sub_(mean[:, None, None] ).div_(std[:, None, None]) imgs = torch.stack([get_trans_seg(self.cell_model(get_trans_seg(imgs, I)) , I, True ).softmax(1)for I in range(seg_TTA)], 0 ).mean(0) return imgs if not precombined: images = self._image_conversion(images) preprocessed_imgs = list(map(_preprocess, images)) bs = 24 predictions = [] for i in range(0, len(preprocessed_imgs), bs): start = i end = min(len(preprocessed_imgs), i+bs) x = preprocessed_imgs[start:end] pred = _segment_helper(x ).cpu().numpy() predictions.append(pred) predictions = list(np.concatenate(predictions, axis=0)) predictions = map(self._restore_scaling_padding, predictions) predictions = list(map(util.img_as_ubyte, predictions)) return predictions def __fill_holes(image): boundaries = segmentation.find_boundaries(image) image = np.multiply(image, np.invert(boundaries)) image = ndi.binary_fill_holes(image > 0) image = ndi.label(image)[0] return image def label_cell(nuclei_pred, cell_pred): def __wsh( mask_img, threshold, border_img, seeds, threshold_adjustment=0.35, small_object_size_cutoff=10, ): img_copy = np.copy(mask_img) m = seeds * border_img img_copy[m <= threshold + threshold_adjustment] = 0 img_copy[m > threshold + threshold_adjustment] = 1 img_copy = img_copy.astype(np.bool) img_copy = remove_small_objects(img_copy, small_object_size_cutoff ).astype( np.uint8 ) mask_img[mask_img <= threshold] = 0 mask_img[mask_img > threshold] = 1 mask_img = mask_img.astype(np.bool) mask_img = remove_small_holes(mask_img, 63) mask_img = remove_small_objects(mask_img, 1 ).astype(np.uint8) markers = ndi.label(img_copy, output=np.uint32)[0] labeled_array = segmentation.watershed( mask_img, markers, mask=mask_img, watershed_line=True ) return labeled_array nuclei_label = __wsh( nuclei_pred[..., 2] / 255.0, 0.4, 1 -(nuclei_pred[..., 1] + cell_pred[..., 1])/ 255.0 > 0.05, nuclei_pred[..., 2] / 255, threshold_adjustment=-0.25, small_object_size_cutoff=small_th, ) nuclei_label = remove_small_objects(nuclei_label, 157) nuclei_label = measure.label(nuclei_label) threshold_value = max(0.22, filters.threshold_otsu(cell_pred[..., 2] / 255)* 0.5) cell_region = np.multiply( cell_pred[..., 2] / 255 > threshold_value, np.invert(np.asarray(cell_pred[..., 1] / 255 > 0.05, dtype=np.int8)) , ) sk = np.asarray(cell_region, dtype=np.int8) distance = np.clip(cell_pred[..., 2], 255 * threshold_value, cell_pred[..., 2]) cell_label = segmentation.watershed(-distance, nuclei_label, mask=sk) cell_label = remove_small_objects(cell_label, 344 ).astype(np.uint8) selem = disk(2) cell_label = closing(cell_label, selem) cell_label = __fill_holes(cell_label) sk = np.asarray( np.add( np.asarray(cell_label > 0, dtype=np.int8), np.asarray(cell_pred[..., 1] / 255 > 0.05, dtype=np.int8), ) > 0, dtype=np.int8, ) cell_label = segmentation.watershed(-distance, cell_label, mask=sk) cell_label = __fill_holes(cell_label) cell_label = np.asarray(cell_label > 0, dtype=np.uint8) cell_label = measure.label(cell_label) cell_label = remove_small_objects(cell_label, 344) cell_label = measure.label(cell_label) cell_label = np.asarray(cell_label, dtype=np.uint16) nuclei_label = np.multiply(cell_label > 0, nuclei_label)> 0 nuclei_label = measure.label(nuclei_label) nuclei_label = remove_small_objects(nuclei_label, 157) nuclei_label = np.multiply(cell_label, nuclei_label > 0) return nuclei_label, cell_label<prepare_x_and_y>
datagen = ImageDataGenerator( rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, zoom_range = 0.1, ) datagen.fit(X_train )
Digit Recognizer
14,556,937
class HPADatasetSeg(Dataset): def __init__(self, df, root='.. /input/hpa-single-cell-image-classification/test/'): self.df = df.reset_index(drop=True) self.root = root def __len__(self): return len(self.df) def __getitem__(self, index): row = self.df.loc[index] r = os.path.join(self.root, f'{row.ID}_red.png') y = os.path.join(self.root, f'{row.ID}_yellow.png') b = os.path.join(self.root, f'{row.ID}_blue.png') r = cv2.imread(r, 0) y = cv2.imread(y, 0) b = cv2.imread(b, 0) target_shape = r.shape gray_image = cv2.resize(b,(seg_size, seg_size)) rgb_image = cv2.resize(np.stack(( r, y, b), axis=2),(seg_size, seg_size)) return gray_image, rgb_image, target_shape, row.ID def collate_fn(batch): gray = [] rgb_image = [] target_shape = [] IDs = [] for data_point in batch: gray.append(data_point[0]) rgb_image.append(data_point[1]) target_shape.append(data_point[2]) IDs.append(data_point[3]) return gray, rgb_image, target_shape, IDs dataset_seg = HPADatasetSeg(df_sub) loader_seg = DataLoader(dataset_seg, batch_size=seg_bs, num_workers=2, collate_fn=collate_fn )<categorify>
history = cnn_model.fit_generator( datagen.flow(X_train,y_train, batch_size=64), validation_data=(X_test, y_test), steps_per_epoch=X_train.shape[0] // 64, epochs=30, callbacks=[early_stopping] )
Digit Recognizer
14,556,937
for gray, rgb, target_shapes, IDs in tqdm(loader_seg): nuc_segmentations = cellsegmentor.pred_nuclei(gray) cell_segmentations = cellsegmentor.pred_cells(rgb, precombined=True) for data_id, target_shape, nuc_seg, cell_seg in zip(IDs, target_shapes, nuc_segmentations, cell_segmentations): nuc, cell = label_cell(nuc_seg, cell_seg) np.savez_compressed(f'./{mask_dir}/{data_id}', cell.astype(np.uint8)) print('---- finish mask write ----' )<set_options>
test_data = test_data / 255.0 test_data = test_data.values.reshape(-1, 28, 28, 1) results = cnn_model.predict(test_data) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
14,556,937
del cellsegmentor gc.collect() torch.cuda.empty_cache()<set_options>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
7,945,168
!nvidia-smi<categorify>
import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from tensorflow import keras from keras.utils.np_utils import to_categorical from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Conv2D, Dropout, MaxPool2D
Digit Recognizer
7,945,168
def encode_binary_mask(mask: np.ndarray)-> t.Text: if mask.dtype != np.bool: raise ValueError( "encode_binary_mask expects a binary mask, received dtype == %s" % mask.dtype) mask = np.squeeze(mask) if len(mask.shape)!= 2: raise ValueError( "encode_binary_mask expects a 2d mask, received shape == %s" % mask.shape) mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1) mask_to_encode = mask_to_encode.astype(np.uint8) mask_to_encode = np.asfortranarray(mask_to_encode) encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"] binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION) base64_str = base64.b64encode(binary_str) return base64_str.decode('ascii') def binary_mask_to_ascii(mask, mask_val=1): mask = np.where(mask==mask_val, 1, 0 ).astype(np.bool) if mask.dtype != np.bool: raise ValueError(f"encode_binary_mask expects a binary mask, received dtype == {mask.dtype}") mask = np.squeeze(mask) if len(mask.shape)!= 2: raise ValueError(f"encode_binary_mask expects a 2d mask, received shape == {mask.shape}") mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1) mask_to_encode = mask_to_encode.astype(np.uint8) mask_to_encode = np.asfortranarray(mask_to_encode) encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"] binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION) base64_str = base64.b64encode(binary_str) return base64_str.decode()<load_pretrained>
righe, colonne = 28,28 n_classi = 10 test = pd.read_csv(".. /input/digit-recognizer/test.csv") train = pd.read_csv(".. /input/digit-recognizer/train.csv") y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) X_train = X_train / 255.0 test = test / 255.0 X_train = X_train.values.reshape(-1,righe,colonne,1) test = test.values.reshape(-1,righe,colonne,1) y_train = to_categorical(y_train, num_classes = n_classi) plt.figure(figsize=(15,4.5)) for i in range(30): plt.subplot(3, 10, i+1) plt.imshow(X_train[i].reshape(( righe,colonne)) ,cmap=plt.cm.binary) plt.axis('off') plt.subplots_adjust(wspace=-0.1, hspace=-0.1) plt.show()
Digit Recognizer
7,945,168
def read_img(image_id, color, train_or_test='test', image_size=None): filename = f'.. /input/hpa-single-cell-image-classification/{train_or_test}/{image_id}_{color}.png' img = cv2.imread(filename, 0) return img class HPADatasetTest(Dataset): def __init__(self, image_ids, mode='test'): self.image_ids = image_ids self.mode = mode def __len__(self): return len(self.image_ids) def __getitem__(self, index): try: image_id = self.image_ids[index] red = read_img(image_id, "red", self.mode, 0) green = read_img(image_id, "green", self.mode, 0) blue = read_img(image_id, "blue", self.mode, 0) yellow = read_img(image_id, "yellow", self.mode, 0) image = np.stack([blue, green, red, yellow], axis=-1) image_512 = cv2.resize(image,(512, 512)).transpose(2,0,1 ).astype(np.float32) image_768 = cv2.resize(image,(768, 768)).transpose(2,0,1 ).astype(np.float32) cell_mask = np.load(f'{mask_dir}/{image_id}.npz')['arr_0'] cell_mask = cv2.resize(cell_mask,(image.shape[0], image.shape[1]), interpolation=cv2.INTER_NEAREST) encs = '' masked_images = [] cells_128 = [] cells_256 = [] center_cells = [] for cell_id in range(1, np.max(cell_mask)+1): bmask =(cell_mask == cell_id ).astype(np.uint8) enc = encode_binary_mask(bmask==1) x, y, w, h = cv2.boundingRect(bmask) max_l = max(w, h) cx = x + w // 2 cy = y + h // 2 x1 = max(0, cx - max_l // 2) x1 = min(x1, image.shape[1] - max_l) y1 = max(0, cy - max_l // 2) y1 = min(y1, image.shape[0] - max_l) tmp = image.copy() tmp[bmask==0] = 0 cropped_cell_orig = tmp[y1:y1+max_l, x1:x1+max_l] cropped_cell_128 = cv2.resize(cropped_cell_orig,(128, 128)) cells_128.append(cropped_cell_128) cropped_cell_256 = cv2.resize(cropped_cell_orig,(256, 256)) cells_256.append(cropped_cell_256) masked = cv2.resize(tmp,(image_size, image_size)) masked_images.append(masked) cropped_cell = cv2.resize(tmp[y:y+h, x:x+w], (int(w / image.shape[1] * 768), int(h / image.shape[0] * 768)) ) final_size = 512 new_h, new_w, _ = cropped_cell.shape new_h = final_size if cropped_cell.shape[0] > final_size else new_h new_w = final_size if cropped_cell.shape[1] > final_size else new_w cropped_cell = cv2.resize(cropped_cell,(new_w, new_h)) center_cell = np.zeros(( final_size, final_size, 4)) center = final_size // 2 h_start = max(0,center-cropped_cell.shape[0]//2) h_end = min(final_size,h_start+cropped_cell.shape[0]) w_start = max(0,center-cropped_cell.shape[1]//2) w_end = min(final_size,w_start+cropped_cell.shape[1]) center_cell[h_start:h_end,w_start:w_end,:] = cropped_cell center_cells.append(center_cell) if encs == '': encs += enc else: encs = encs + ' ' + enc if len(masked_images)> 0: masked_images = np.stack(masked_images ).transpose(0, 3, 1, 2 ).astype(np.float32) cells_128 = np.stack(cells_128 ).transpose(0, 3, 1, 2 ).astype(np.float32) cells_256 = np.stack(cells_256 ).transpose(0, 3, 1, 2 ).astype(np.float32) center_cells = np.stack(center_cells ).transpose(0, 3, 1, 2 ).astype(np.float32) else: masked_images = np.zeros(( 4, 4, image_size, image_size)) cells_128 = np.zeros(( 4, 4, 128, 128)) cells_256 = np.zeros(( 4, 4, 256, 256)) for ch in range(4): image_512[ch] /= orig_mean[ch] image_768[ch] /= orig_mean[ch] masked_images[:, ch] /= orig_mean[ch] cells_128[:, ch] /= orig_mean[ch] cells_256[:, ch] /= orig_mean[ch] center_cells[:, ch] /= orig_mean[ch] except: image_id = '' encs = '' image_512 = np.zeros(( 4, 512, 512)) image_768 = np.zeros(( 4, 768, 768)) masked_images = np.zeros(( 5, 4, image_size, image_size)) cells_128 = np.zeros(( 5, 4, 128, 128)) cells_256 = np.zeros(( 5, 4, 256, 256)) center_cells = np.zeros(( 5, 4, 512, 512)) return image_id, encs, { '512': torch.tensor(image_512), '768': torch.tensor(image_768), 'masked': torch.tensor(masked_images), 'cells_128': torch.tensor(cells_128), 'cells_256': torch.tensor(cells_256), 'center_cells': torch.tensor(center_cells) } <create_dataframe>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1 )
Digit Recognizer
7,945,168
dataset = HPADatasetTest(df_sub.ID.values, mode='test') dataloader = DataLoader(dataset, batch_size=1, num_workers=2 )<choose_model_class>
nets = 7 model = [0] *nets for i in range(nets): model[i] = Sequential() model[i].add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(righe,colonne,1))) model[i].add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model[i].add(MaxPool2D(pool_size=(2,2))) model[i].add(Dropout(0.25)) model[i].add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model[i].add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model[i].add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model[i].add(Dropout(0.25)) model[i].add(Flatten()) model[i].add(Dense(256, activation = "relu")) model[i].add(Dropout(0.5)) model[i].add(Dense(n_classi, activation = "softmax")) model[i].compile(loss="categorical_crossentropy", optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
7,945,168
class enetv2(nn.Module): def __init__(self, enet_type, out_dim=num_classes): super(enetv2, self ).__init__() self.enet = timm.create_model(enet_type, False) if('efficientnet' in enet_type)or('mixnet' in enet_type): self.enet.conv_stem.weight = nn.Parameter(self.enet.conv_stem.weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.classifier.in_features, out_dim) self.enet.classifier = nn.Identity() elif('resnet' in enet_type or 'resnest' in enet_type)and 'vit' not in enet_type: self.enet.conv1[0].weight = nn.Parameter(self.enet.conv1[0].weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.fc.in_features, out_dim) self.enet.fc = nn.Identity() elif 'rexnet' in enet_type or 'regnety' in enet_type or 'nf_regnet' in enet_type: self.enet.stem.conv.weight = nn.Parameter(self.enet.stem.conv.weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.head.fc.in_features, out_dim) self.enet.head.fc = nn.Identity() elif 'resnext' in enet_type: self.enet.conv1.weight = nn.Parameter(self.enet.conv1.weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.fc.in_features, out_dim) self.enet.fc = nn.Identity() elif 'hrnet_w32' in enet_type: self.enet.conv1.weight = nn.Parameter(self.enet.conv1.weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.classifier.in_features, out_dim) self.enet.classifier = nn.Identity() elif 'densenet' in enet_type: self.enet.features.conv0.weight = nn.Parameter(self.enet.features.conv0.weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.classifier.in_features, out_dim) self.enet.classifier = nn.Identity() elif 'ese_vovnet39b' in enet_type or 'xception41' in enet_type: self.enet.stem[0].conv.weight = nn.Parameter(self.enet.stem[0].conv.weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.head.fc.in_features, out_dim) self.enet.head.fc = nn.Identity() elif 'dpn' in enet_type: self.enet.features.conv1_1.conv.weight = nn.Parameter(self.enet.features.conv1_1.conv.weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.classifier.in_channels, out_dim) self.enet.classifier = nn.Identity() elif 'inception' in enet_type: self.enet.features[0].conv.weight = nn.Parameter(self.enet.features[0].conv.weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.last_linear.in_features, out_dim) self.enet.last_linear = nn.Identity() elif 'vit_base_resnet50' in enet_type: self.enet.patch_embed.backbone.stem.conv.weight = nn.Parameter(self.enet.patch_embed.backbone.stem.conv.weight.repeat(1,n_ch//3+1,1,1)[:, :n_ch]) self.myfc = nn.Linear(self.enet.head.in_features, out_dim) self.enet.head = nn.Identity() else: raise def forward(self, x): x = self.enet(x) h = self.myfc(x) return h <define_variables>
learning_rate = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x) batch = 64 epochs = 5 H = [0] * nets for j in range(nets): X_train2, X_val2, y_train2, y_val2 = train_test_split(X_train, y_train, test_size = 0.1) H[j] = model[j].fit_generator(datagen.flow(X_train2,y_train2, batch_size=batch), epochs = epochs, validation_data =(X_val2,y_val2), verbose = 1, steps_per_epoch=X_train2.shape[0] // batch ,callbacks=[learning_rate])
Digit Recognizer
7,945,168
kernel_types = { 'resnet50d_512_multilabel_8flips_ss22rot45_co2_lr1e4_bs32_focal_ext_15epo': { 'model_class': 'enetv2', 'folds': [1], 'enet_type': 'resnet50d', 'input_type': ['512', 'masked'], }, 'rex150_512_multilabel_8flips_ss22rot45_co7_lr3e4_bs32_ext_cellpseudo2full_15epo': { 'model_class': 'enetv2', 'folds': [0], 'enet_type': 'rexnet_150', 'input_type': ['512', 'masked'], }, 'densenet121_512_multilabel_8flips_ss22rot45_co2_lr1e4_bs32_focal_ext_15epo': { 'model_class': 'enetv2', 'folds': [2], 'enet_type': 'densenet121', 'input_type': ['512', 'masked'], }, 'b0_512_multilabel_8flips_ss22rot45_co7_lr1e4_bs32_focal_ext_15epo': { 'model_class': 'enetv2', 'folds': [3], 'enet_type': 'tf_efficientnet_b0_ns', 'input_type': ['512', 'masked'], }, 'resnet101d_512_multilabel_8flips_ss22rot45_co7_lr1e4_bs32_focal_ext_15epo': { 'model_class': 'enetv2', 'folds': [4], 'enet_type': 'resnet101d', 'input_type': ['512', 'masked'], }, 'dpn68b_512_multilabel_8flips_ss22rot45_co7_lr1e4_bs32_focal_ext_15epo': { 'model_class': 'enetv2', 'folds': [0], 'enet_type': 'dpn68b', 'input_type': ['512', 'masked'], }, 'densenet169_512_multilabel_8flips_ss22rot45_co2_lr1e4_bs32_focal_ext_15epo': { 'model_class': 'enetv2', 'folds': [1], 'enet_type': 'densenet169', 'input_type': ['512', 'masked'], }, 'b0_3d128_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_2019_15epo': { 'model_class': 'enetv2', 'folds': [2], 'enet_type': 'tf_efficientnet_b0_ns', 'input_type': ['cells_128'], }, 'resnet50d_3d128_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_15epo': { 'model_class': 'enetv2', 'folds': [0], 'enet_type': 'resnet50d', 'input_type': ['cells_128'], }, 'mixnet_m_3d128_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_15epo': { 'model_class': 'enetv2', 'folds': [0], 'enet_type': 'mixnet_m', 'input_type': ['cells_128'], }, 'densenet121_3d128_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_2019_15epo': { 'model_class': 'enetv2', 'folds': [3], 'enet_type': 'densenet121', 'input_type': ['cells_128'], }, 'b0_3d256_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_2019_15epo': { 'model_class': 'enetv2', 'folds': [0], 'enet_type': 'tf_efficientnet_b0_ns', 'input_type': ['cells_256'], }, 'b1_3d256_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_2019_15epo': { 'model_class': 'enetv2', 'folds': [3], 'enet_type': 'tf_efficientnet_b1_ns', 'input_type': ['cells_256'], }, 'densenet121_3d256_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_2019_15epo': { 'model_class': 'enetv2', 'folds': [2], 'enet_type': 'densenet121', 'input_type': ['cells_256'], }, 'dpn68b_3d256_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_2019_15epo': { 'model_class': 'enetv2', 'folds': [4], 'enet_type': 'dpn68b', 'input_type': ['cells_256'], }, 'mixnet_m_3d256_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_2019_15epo': { 'model_class': 'enetv2', 'folds': [2], 'enet_type': 'mixnet_m', 'input_type': ['cells_256'], }, 'resnet50d_3d256_multilabel_lw41_8flips_ss22rot45_lr1e4_bs32cell16_ext_2019_15epo': { 'model_class': 'enetv2', 'folds': [1], 'enet_type': 'resnet50d', 'input_type': ['cells_256'], }, }<load_pretrained>
results = np.zeros(( test.shape[0],10)) for j in range(nets): results = results + model[j].predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") sub = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) sub.to_csv("submission.csv",index=False )
Digit Recognizer
7,945,168
def load_state_dict(model, model_file): for folder in model_dirs: model_path = os.path.join(folder, model_file) if os.path.exists(model_path): state_dict = torch.load(model_path) state_dict = {k[7:] if k.startswith('module.')else k: state_dict[k] for k in state_dict.keys() } model.load_state_dict(state_dict, strict=True) model.eval() return model raise models = [] input_types = [] for key in kernel_types.keys() : for fold in kernel_types[key]['folds']: model = eval(kernel_types[key]['model_class'] )( kernel_types[key]['enet_type'], ) model = model.to(device) model_file = f'{key}_final_fold{fold}.pth' print(f'loading {model_file}...') model = load_state_dict(model, model_file) models.append(model) input_types.append(kernel_types[key]['input_type']) n_models = len(models) print('done!') print('model count:', n_models )<load_pretrained>
Digit Recognizer
13,825,967
def load_model(model_name,path): if model_name == 'densenet121': state_dict = torch.load(path, torch.device('cuda')) model = class_densenet121_dropout(num_classes=19,in_channels=4,pretrained_file=None) model.cuda() model.load_state_dict(state_dict) model.eval() return model<define_variables>
Data=pd.read_csv('.. /input/digit-recognizer/train.csv' )
Digit Recognizer
13,825,967
folds = [0,1,2,3,4] model_dic = {'densenet121':'.. /input/hpafinal/output/run_nn_20210504_000509/'}<load_pretrained>
Y=np.array(Data['label']) X=np.array(Data.drop('label',axis=1)) / 255 .
Digit Recognizer
13,825,967
rgby_models = [] for model_name in model_dic: path = model_dic[model_name] for fold in folds: if os.path.exists(path+'fold%s.ckpt'%fold): model = load_model(model_name,path+'fold%s.ckpt'%fold) rgby_models.append(model) print('daishu model count:', len(rgby_models))<categorify>
plt.imshow(X[25].reshape(28,28)) print(Y[25] )
Digit Recognizer
13,825,967
def get_trans(img, I, mode='bgry'): if mode == 'rgby': img = img[:, [2,1,0,3]] if I >= 4: img = img.transpose(2,3) if I % 4 == 0: return img elif I % 4 == 1: return img.flip(2) elif I % 4 == 2: return img.flip(3) elif I % 4 == 3: return img.flip(2 ).flip(3) def get_trans_daishu(img, I, mode='bgry'): if mode == 'rgby': img = img[:, [2,1,0,3]] if I == 0: img = img[:, :, 64:704, 64:704] if I == 1: img = img[:, :, :640, :640].flip(2) if I == 2: img = img[:, :, :640, 128:].flip(3) if I == 3: img = img[:, :, 128:, 128:].flip(2 ).flip(3) if I == 4: img = img[:, :, 128:, :640].transpose(2,3) if I == 5: img = img[:, :, 32:672, 96:736].transpose(2,3 ).flip(2) if I == 6: img = img[:, :, 96:736, 32:672].transpose(2,3 ).flip(3) img = F.interpolate(img, size=[512, 512], mode="bilinear") return img<normalization>
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2 )
Digit Recognizer
13,825,967
IDs = [] encs = [] PRED_FINAL = [] little_bs = 16 with torch.no_grad() : for ID, enc, images in tqdm(dataloader): try: if len(enc[0])> 0: with amp.autocast() : for k in images.keys() : images[k] = images[k].cuda() if images[k].ndim == 5: images[k] = images[k].squeeze(0) preds = { 'orig': [], 'cells': [], } for m, inp_types in zip(models, input_types): for t in inp_types: if t in ['masked', 'cells_128', 'cells_256']: for I in np.random.choice(range(8), TTA[t], replace=False): this_pred = torch.cat([ m(get_trans(images[t][b:b+little_bs], I)).sigmoid() \ for b in range(0, images[t].shape[0], little_bs) ]) preds['cells'].append(this_pred) for m in rgby_models: for I in np.random.choice(range(8), TTA['center_cells'], replace=False): this_pred = torch.cat([ m(get_trans(images['center_cells'][b:b+little_bs], I, 'rgby')) [1].sigmoid() \ for b in range(0, images['center_cells'].shape[0], little_bs) ]) preds['cells'].append(this_pred) for k in preds.keys() : if len(preds[k])> 0: preds[k] = torch.stack(preds[k], 0 ).mean(0) else: preds[k] = 0 pred_final = preds['cells'] PRED_FINAL.append(pred_final.cpu()) IDs += [ID[0]] * images['cells_128'].shape[0] encs += enc[0].split(' ') except: print('error') pass PRED_FINAL = torch.cat(PRED_FINAL ).float()<categorify>
import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Flatten,BatchNormalization,Dropout,Conv2D,MaxPool2D
Digit Recognizer
13,825,967
PredictionString = [] for i in tqdm(range(PRED_FINAL.shape[0])) : enc = encs[i] prob = PRED_FINAL[i] sub_string = [] for cid, p in enumerate(prob): sub_string.append(' '.join([str(cid), f'{p:.5f}', enc])) sub_string = ' '.join(sub_string) PredictionString.append(sub_string )<create_dataframe>
print(tf.config.list_physical_devices('GPU'),'//',tf.test.is_built_with_cuda() )
Digit Recognizer
13,825,967
df_pred = pd.DataFrame({ 'ID': IDs, 'PredictionString': PredictionString }) df_pred = df_pred.groupby(['ID'])['PredictionString'].apply(lambda x: ' '.join(x)).reset_index()<save_to_csv>
datagen = tf.keras.preprocessing.image.ImageDataGenerator( rotation_range=12, width_shift_range=0.12, height_shift_range=0.12, shear_range=0.12, validation_split=0.2 )
Digit Recognizer
13,825,967
df_sub = df_sub[['ID', 'ImageWidth', 'ImageHeight']].merge(df_pred, on='ID', how="left") df_sub.fillna('', inplace=True) df_sub.to_csv('submission.csv', index=False )<install_modules>
training_generator = datagen.flow(X_train, y_train, batch_size=32,subset='training') validation_generator = datagen.flow(X_train, y_train, batch_size=32,subset='validation' )
Digit Recognizer
13,825,967
!pip install -q ".. /input/pycocotools/pycocotools-2.0-cp37-cp37m-linux_x86_64.whl" !pip install -q ".. /input/hpapytorchzoozip/pytorch_zoo-master" !pip install -q ".. /input/hpacellsegmentatormaster/HPA-Cell-Segmentation-master" NUC_MODEL = '.. /input/hpacellsegmentatormodelweights/dpn_unet_nuclei_v1.pth' CELL_MODEL = '.. /input/hpacellsegmentatormodelweights/dpn_unet_cell_3ch_v1.pth'<choose_model_class>
model=Sequential()
Digit Recognizer
13,825,967
segmentator = cellseg.CellSegmentator( NUC_MODEL, CELL_MODEL, scale_factor=0.25, padding=True, multi_channel_model=True ) <set_options>
model.add(Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same',activation ='relu', input_shape =(28,28,1))) model.add(BatchNormalization()) model.add(Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same',activation ='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.3)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',activation ='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',activation ='relu')) model.add(BatchNormalization()) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.3)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(256, activation = "relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
13,825,967
gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "...Physical GPUs,", len(logical_gpus), "Logical GPUs... ") except RuntimeError as e: print(e )<load_pretrained>
model.compile( loss="sparse_categorical_crossentropy", optimizer=tf.keras.optimizers.RMSprop(lr=0.003, rho=0.9, epsilon=1e-08, decay=0.0), metrics=["accuracy"] )
Digit Recognizer
13,825,967
RGB_model = keras.models.load_model('.. /input/hpa-models-2021/ProteinModelRGB_rev_18.h5') G_model = keras.models.load_model('.. /input/hpa-models-2021/GreentileProteinModel_rev_2.h5') multicellmodel = keras.models.load_model('.. /input/hpa-models-2021/Full_image_greenModelRev9.h5', custom_objects={'FixedDropout':FixedDropout(rate=0.5)}) img_type = 'g' multicell2model = keras.models.load_model('.. /input/hpa-models-2021/Full_image_RYB_GModelRev11.h5', custom_objects={'FixedDropout':FixedDropout(rate=0.5)}) img2_type = 'ryb_g'<categorify>
cb=tf.keras.callbacks.EarlyStopping(patience=10,restore_best_weights=True) learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
13,825,967
remove_small_holes, remove_small_objects) def label_cell(nuclei_pred, cell_pred): def __wsh( mask_img, threshold, border_img, seeds, threshold_adjustment=0.35, small_object_size_cutoff=10, ): img_copy = np.copy(mask_img) m = seeds * border_img img_copy[m <= threshold + threshold_adjustment] = 0 img_copy[m > threshold + threshold_adjustment] = 1 img_copy = img_copy.astype(np.bool) img_copy = remove_small_objects(img_copy, small_object_size_cutoff ).astype( np.uint8 ) mask_img[mask_img <= threshold] = 0 mask_img[mask_img > threshold] = 1 mask_img = mask_img.astype(np.bool) mask_img = remove_small_holes(mask_img, 63) mask_img = remove_small_objects(mask_img, 1 ).astype(np.uint8) markers = ndi.label(img_copy, output=np.uint32)[0] labeled_array = segmentation.watershed( mask_img, markers, mask=mask_img, watershed_line=True ) return labeled_array nuclei_label = __wsh( nuclei_pred[..., 2] / 255.0, 0.4, 1 -(nuclei_pred[..., 1] + cell_pred[..., 1])/ 255.0 > 0.05, nuclei_pred[..., 2] / 255, threshold_adjustment=-0.25, small_object_size_cutoff=32, ) nuclei_label = remove_small_objects(nuclei_label, 157) nuclei_label = measure.label(nuclei_label) threshold_value = max(0.22, filters.threshold_otsu(cell_pred[..., 2] / 255)* 0.5) cell_region = np.multiply( cell_pred[..., 2] / 255 > threshold_value, np.invert(np.asarray(cell_pred[..., 1] / 255 > 0.05, dtype=np.int8)) , ) sk = np.asarray(cell_region, dtype=np.int8) distance = np.clip(cell_pred[..., 2], 255 * threshold_value, cell_pred[..., 2]) cell_label = segmentation.watershed(-distance, nuclei_label, mask=sk) cell_label = remove_small_objects(cell_label, 344 ).astype(np.uint8) selem = disk(2) cell_label = closing(cell_label, selem) cell_label = __fill_holes(cell_label) sk = np.asarray( np.add( np.asarray(cell_label > 0, dtype=np.int8), np.asarray(cell_pred[..., 1] / 255 > 0.05, dtype=np.int8), ) > 0, dtype=np.int8, ) cell_label = segmentation.watershed(-distance, cell_label, mask=sk) cell_label = __fill_holes(cell_label) cell_label = np.asarray(cell_label > 0, dtype=np.uint8) cell_label = measure.label(cell_label) cell_label = remove_small_objects(cell_label, 344) cell_label = measure.label(cell_label) cell_label = np.asarray(cell_label, dtype=np.uint16) return nuclei_label, cell_label def __fill_holes(image): boundaries = segmentation.find_boundaries(image) image = np.multiply(image, np.invert(boundaries)) image = ndi.binary_fill_holes(image > 0) image = ndi.label(image)[0] return image<define_variables>
model.fit(training_generator,epochs=100,callbacks=[cb,learning_rate_reduction],validation_data=validation_generator )
Digit Recognizer
13,825,967
def build_image_names(image_id: str)-> list: mt = f'/kaggle/input/hpa-single-cell-image-classification/test/{image_id}_red.png' er = f'/kaggle/input/hpa-single-cell-image-classification/test/{image_id}_yellow.png' nu = f'/kaggle/input/hpa-single-cell-image-classification/test/{image_id}_blue.png' high = f'/kaggle/input/hpa-single-cell-image-classification/test/{image_id}_green.png' return [mt], [er], [nu], [high], [[mt], [er], [nu]], def grab_contours(cnts): if len(cnts)== 2: cnts = cnts[0] elif len(cnts)== 3: cnts = cnts[1] else: raise Exception(( "Contours tuple must have length 2 or 3, " "otherwise OpenCV changed their cv2.findContours return " "signature yet again.Refer to OpenCV's documentation " "in that case")) return cnts def create_cell_images(RGB, RYB_G, G, cell_masks, size): def clipimgtosquare(group_img): cnt_img = np.zeros_like(group_img) cnt_img[cover == 255] = group_img[cover == 255] cnt_img = cnt_img[y:y+h, x:x+w] old_size = cnt_img.shape[:2] ratio = float(size)/max(old_size) new_size = tuple([int(x*ratio)for x in old_size]) resized = cv2.resize(cnt_img,(new_size[1], new_size[0])) delta_w = size - new_size[1] delta_h = size - new_size[0] top, bottom = delta_h//2, delta_h-(delta_h//2) left, right = delta_w//2, delta_w-(delta_w//2) color = [0, 0, 0] square = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) square = img_to_array(square) return square mask = cv2.convertScaleAbs(cell_masks) cnts = grab_contours(cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)) RGBs = [] RYB_Gs = [] Gs = [] for i in range(1,cell_masks.max()): mask = cv2.convertScaleAbs(np.where(cell_masks==i, 1, 0)) c = grab_contours(cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)) (x,y,w,h)= cv2.boundingRect(c[0]) cover = np.zeros_like(RGB) cv2.drawContours(cover, [c[0]], 0,(255,255,255), -1) rgb = clipimgtosquare(RGB) ryb_g = clipimgtosquare(RYB_G) g = clipimgtosquare(G) RGBs.append(rgb) RYB_Gs.append(ryb_g) Gs.append(g) return RGBs, RYB_Gs, Gs def image_predictions(images, model, TTArepeat=0, batch_size=8): labels = [] confidences = [] images = np.vstack(images) confidence = model.predict(images,batch_size=batch_size) if TTArepeat > 0: TTApred = [] TTApred.append(confidence) image = data_augmentation(images) for i in range(TTArepeat): image = data_augmentation(image) TTApred.append(model.predict(image)) confidence = np.mean(TTApred,axis=0) confidences.append(confidence) return confidences def create_cell_masks(mask): if mask.dtype != np.bool: raise ValueError( "encode_binary_mask expects a binary mask, received dtype == %s" % mask.dtype) mask = np.squeeze(mask) if len(mask.shape)!= 2: raise ValueError( "encode_binary_mask expects a 2d mask, received shape == %s" % mask.shape) mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1) mask_to_encode = mask_to_encode.astype(np.uint8) mask_to_encode = np.asfortranarray(mask_to_encode) encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"] binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION) base64_str = base64.b64encode(binary_str) return base64_str def get_image_masks(image_id): mt, er, nu, high, images = build_image_names(image_id=image_id) nuc_segmentations = segmentator.pred_nuclei(images[2]) cell_segmentations = segmentator.pred_cells(images) nuclei_mask, cell_mask = label_cell(nuc_segmentations[0], cell_segmentations[0]) blue = normalization(plt.imread(nu[0])) green = normalization(plt.imread(high[0])) red = normalization(plt.imread(mt[0])) yellow = normalization(plt.imread(er[0])) RGB = np.dstack(( red, green, blue)) RYB_G =img = np.dstack(( red+green, yellow+green, blue+green)) G = np.stack(( green,)*3, axis=-1) return RGB, RYB_G, G, cell_mask def flatten_list_of_lists(l_o_l, to_string=False): if not to_string: return [item for sublist in l_o_l for item in sublist] else: return [str(item)for sublist in l_o_l for item in sublist] def image_prediction_string(confidences, masks, labelqty = 19, threshold =.00): labels = [] probs = [] codes = [] predictionstring = [] for pred, mask in zip(confidences, masks): neglabel = 1-pred.max() for label in range(0,labelqty): if pred[label]>threshold: labels.append(label) probs.append(pred[label]) codes.append(mask.decode('UTF-8')) labels.append(labelqty) probs.append(neglabel) codes.append(mask.decode('UTF-8')) predictionstring = [" ".join(flatten_list_of_lists(zip([label, pred, mask]), to_string=True)) for label, pred, mask in zip(labels, probs, codes)] return(" ".join(predictionstring)) def data_aug_exp(img, modeltype, size): images = [] images.append(img) image = tf.image.rot90(img, k=1) images.append(image) image = tf.image.central_crop(image, central_fraction=.6) image = tf.image.resize(image, [size,size]) images.append(image) image = tf.image.flip_left_right(img) images.append(image) image = tf.image.central_crop(image, central_fraction=.6) image = tf.image.resize(image, [size,size]) images.append(image) image = tf.image.flip_up_down(img) images.append(image) image = tf.image.central_crop(image, central_fraction=.6) image = tf.image.resize(image, [size,size]) images.append(image) if modeltype == 'ryb_g': image = tf.image.adjust_contrast(img, 0.55) images.append(image) images = tf.expand_dims(images, axis=0) images = np.vstack(images) return images def image_predictions_exp(images, model, modeltype, TTArepeat=False, batch_size=8, size=128): images = np.expand_dims(images, axis=0) images = np.vstack(images) confidence = model.predict(images,batch_size=batch_size) if TTArepeat: TTApred = [] aug_images = data_aug_exp(images,modeltype,size) for img in aug_images: TTApred.append(model.predict(img)) confidence = np.mean(TTApred,axis=0) return confidence def weighted_predictions(pred1, wt1, pred2=0, wt2=0, pred3=0, wt3=0): new_pred = pred1*wt1+pred2*wt2+pred3*wt3 return np.array(new_pred) def normalization(array): a =(array - array.min())/(array.max() -array.min()) return a def add_neglabel(array, labelqty): arraynew = [] maximum = array.max(axis = 1) array = np.insert(array, labelqty, 1-maximum, axis = 1) return array def print_cell(image, title, index): plt.subplot(1,10,index) plt.imshow(image) plt.title(title) plt.axis('off') <define_variables>
model.evaluate(X_test, y_test )
Digit Recognizer
13,825,967
start = time.time() test_dir = '.. /input/hpa-single-cell-image-classification/test/' test_images = os.listdir(test_dir) images = [i.split("_")[0] for i in test_images] names = np.unique(images) public = len(names)==559 if public: print('...only public testset...') names = names[0:2]<save_to_csv>
pred_Data=np.array(pd.read_csv('.. /input/digit-recognizer/test.csv')/ 255.) X_pred=pred_Data.reshape(( -1,28,28,1))
Digit Recognizer
13,825,967
sub.to_csv("/kaggle/working/submission.csv", index=False )<import_modules>
predictions=model.predict_classes(X_pred )
Digit Recognizer
13,825,967
<prepare_x_and_y><EOS>
submit=pd.DataFrame({'ImageId':range(1,len(predictions)+1),'Label':predictions}) submit.to_csv('submission.csv',index=False )
Digit Recognizer
13,423,365
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<import_modules>
warnings.filterwarnings('ignore') sns.set_context("paper", font_scale = 1, rc={"grid.linewidth": 3}) pd.set_option('display.max_rows', 100, 'display.max_columns', 400)
Digit Recognizer
13,423,365
from sklearn.model_selection import StratifiedKFold from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.calibration import CalibratedClassifierCV<split>
train_data=pd.read_csv('.. /input/digit-recognizer/train.csv') test_data=pd.read_csv('.. /input/digit-recognizer/test.csv') sample_data = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv' )
Digit Recognizer
13,423,365
np.random.seed(0) n_folds = 10 shuffle = False if shuffle: idx = np.random.permutation(y.size) X = X[idx] y = y[idx] skf = list(StratifiedKFold(n_folds ).split(X, y)) clfs = [RandomForestClassifier(n_estimators=1000, n_jobs=-1, criterion='gini'), RandomForestClassifier(n_estimators=1000, n_jobs=-1, criterion='entropy'), ExtraTreesClassifier(n_estimators=1000, n_jobs=-1, criterion='gini'), ExtraTreesClassifier(n_estimators=1000, n_jobs=-1, criterion='entropy'), GradientBoostingClassifier( learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=600)] clfs = [CalibratedClassifierCV(clf, method='isotonic', cv=StratifiedKFold(5)) for clf in clfs] dataset_blend_train = np.zeros(( X.shape[0], len(clfs))) dataset_blend_test = np.zeros(( X_submission.shape[0], len(clfs))) for clf_idx, clf in enumerate(clfs): print(clf_idx, clf) dataset_blend_test_cv = np.zeros(( X_submission.shape[0], len(skf))) for fold_idx,(train_idxs, test_idxs)in enumerate(skf): print('Fold', fold_idx) X_train, y_train = X[train_idxs], y[train_idxs] X_test, y_test = X[test_idxs], y[test_idxs] clf.fit(X_train, y_train) dataset_blend_train[test_idxs, clf_idx] = clf.predict_proba(X_test)[:, 1] dataset_blend_test_cv[:, fold_idx] = clf.predict_proba(X_submission)[:, 1] dataset_blend_test[:, clf_idx] = dataset_blend_test_cv.mean(1 )<find_best_model_class>
train_df = train_data.iloc[:, 1:].values y_train = train_data.iloc[:, 0].values test_df = test_data.values
Digit Recognizer
13,423,365
clf = LogisticRegression() clf.fit(dataset_blend_train, y) y_submission = clf.predict_proba(dataset_blend_test)[:, 1] y_submission =(y_submission - y_submission.min())/(y_submission.max() - y_submission.min()) tmp = np.vstack([range(1, len(y_submission)+ 1), y_submission] ).T np.savetxt(fname='submission.csv', X=tmp, fmt='%d,%0.9f', header='MoleculeId,PredictedProbability', comments='' )<install_modules>
img_tform_1 = transforms.Compose([ transforms.ToPILImage() ,transforms.ToTensor() ,transforms.Normalize(( 0.5),(0.5)) ]) img_tform_2 = transforms.Compose([ transforms.ToPILImage() ,transforms.RandomRotation(10),transforms.ToTensor() ,transforms.Normalize(( 0.5),(0.5)) ]) img_tform_3 = transforms.Compose([ transforms.ToPILImage() ,transforms.RandomRotation(20),transforms.ToTensor() ,transforms.Normalize(( 0.5),(0.5)) ]) img_tform_4 = transforms.Compose([ transforms.ToPILImage() ,transforms.RandomAffine(degrees=15, translate=(0.1,0.1), scale=(0.85,0.85)) ,\ transforms.ToTensor() ,transforms.Normalize(( 0.5),(0.5)) ]) img_tform_5 = transforms.Compose([ transforms.ToPILImage() ,transforms.RandomAffine(0,shear=30,scale=[1.15,1.15]),\ transforms.ToTensor() ,transforms.Normalize(( 0.5),(0.5)) ]) img_tform_6 = transforms.Compose([ transforms.ToPILImage() ,transforms.RandomAffine(0,shear=20,scale=[0.8,0.8]),\ transforms.ToTensor() ,transforms.Normalize(( 0.5),(0.5)) ]) img_tform_7 = transforms.Compose([ transforms.ToPILImage() ,transforms.RandomAffine(degrees=30, scale=(1.2,1.2)) ,\ transforms.ToTensor() ,transforms.Normalize(( 0.5),(0.5)) ] )
Digit Recognizer
13,423,365
!pip install /kaggle/input/kerasapplications -q !pip install /kaggle/input/efficientnet-keras-source-code/ -q --no-deps<install_modules>
class MnistDataset(Dataset): def __init__(self, features,transform=img_tform_1): self.features = features.iloc[:,1:].values.reshape(( -1,28,28)).astype(np.uint8) self.targets = torch.from_numpy(features.label.values) self.transform=transform def __len__(self): return(self.features.shape[0]) def __getitem__(self, idx): return self.transform(self.features[idx]),self.targets[idx] class TestDataset(Dataset): def __init__(self, features,transform=img_tform_1): self.features = features.values.reshape(( -1,28,28)).astype(np.uint8) self.targets = None self.transform=transform def __len__(self): return(self.features.shape[0]) def __getitem__(self, idx): return self.transform(self.features[idx] )
Digit Recognizer
13,423,365
print(" ...INSTALLING AND IMPORTING CELL-PROFILER TOOL(HPACELLSEG )... ") try: except: !pip install -q "/kaggle/input/pycocotools/pycocotools-2.0-cp37-cp37m-linux_x86_64.whl" !pip install -q "/kaggle/input/hpapytorchzoozip/pytorch_zoo-master" !pip install -q "/kaggle/input/hpacellsegmentatormaster/HPA-Cell-Segmentation-master" print(" ...OTHER IMPORTS STARTING... ") print(" \tVERSION INFORMATION") LBL_NAMES = ["Nucleoplasm", "Nuclear Membrane", "Nucleoli", "Nucleoli Fibrillar Center", "Nuclear Speckles", "Nuclear Bodies", "Endoplasmic Reticulum", "Golgi Apparatus", "Intermediate Filaments", "Actin Filaments", "Microtubules", "Mitotic Spindle", "Centrosome", "Plasma Membrane", "Mitochondria", "Aggresome", "Cytosol", "Vesicles", "Negative"] INT_2_STR = {x:LBL_NAMES[x] for x in np.arange(19)} INT_2_STR_LOWER = {k:v.lower().replace(" ", "_")for k,v in INT_2_STR.items() } STR_2_INT_LOWER = {v:k for k,v in INT_2_STR_LOWER.items() } STR_2_INT = {v:k for k,v in INT_2_STR.items() } FIG_FONT = dict(family="Helvetica, Arial", size=14, color=" LABEL_COLORS = [px.colors.label_rgb(px.colors.convert_to_RGB_255(x)) for x in sns.color_palette("Spectral", len(LBL_NAMES)) ] LABEL_COL_MAP = {str(i):x for i,x in enumerate(LABEL_COLORS)} print(" ...IMPORTS COMPLETE... ") ONLY_PUBLIC = True if ONLY_PUBLIC: print(" ...ONLY INFERRING ON PUBLIC TEST DATA(USING PRE-PROCESSED DF )... ") else: gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "...Physical GPUs,", len(logical_gpus), "Logical GPUs... ") except RuntimeError as e: print(e )<install_modules>
def create_dataloaders(seed, test_size=0.1, df=train_data, batch_size=32): train_df, val_df = train_test_split(df,test_size=test_size,random_state=seed) train_data_1 = MnistDataset(train_df) train_data_2 = MnistDataset(train_df, img_tform_2) train_data_3 = MnistDataset(train_df, img_tform_3) train_data_4 = MnistDataset(train_df, img_tform_4) train_data_5 = MnistDataset(train_df, img_tform_5) train_data_6 = MnistDataset(train_df, img_tform_6) train_data_7 = MnistDataset(train_df, img_tform_7) train_final = ConcatDataset([train_data_1, train_data_2, train_data_3, train_data_4, train_data_5,\ train_data_6,train_data_7]) val_data = MnistDataset(val_df) train_loader = torch.utils.data.DataLoader(train_final, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle=False) return train_loader, valid_loader
Digit Recognizer
13,423,365
!cp -r.. /input/focallosstensorflowstablefromartemmavrin/focal-loss-master/*./ !pip install./focal-loss-master/<choose_model_class>
class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3), nn.BatchNorm2d(32), nn.LeakyReLU(inplace=True), nn.Conv2d(32, 32, kernel_size=3), nn.BatchNorm2d(32), nn.LeakyReLU(inplace=True), nn.Conv2d(32, 32, kernel_size=5, stride=2, padding=14), nn.BatchNorm2d(32), nn.LeakyReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Dropout2d(0.25), nn.Conv2d(32, 64, kernel_size=3), nn.BatchNorm2d(64), nn.LeakyReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3), nn.BatchNorm2d(64), nn.LeakyReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=6), nn.BatchNorm2d(64), nn.LeakyReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Dropout2d(0.25), nn.Conv2d(64, 128, kernel_size=4), nn.BatchNorm2d(128), nn.LeakyReLU(inplace=True), nn.Dropout2d(0.25) ) self.fc = nn.Sequential( nn.Linear(128*1*1, 10) ) def forward(self, x): x = self.conv1(x) x = x.view(-1, 128*1*1) x = self.fc(x) return x
Digit Recognizer
13,423,365
def binary_focal_loss(gamma=2, alpha=0.25): alpha = tf.constant(alpha, dtype=tf.float32) gamma = tf.constant(gamma, dtype=tf.float32) def binary_focal_loss_fixed(y_true, y_pred): y_true = tf.cast(y_true, tf.float32) alpha_t = y_true*alpha +(K.ones_like(y_true)-y_true)*(1-alpha) p_t = y_true*y_pred +(K.ones_like(y_true)-y_true)*(K.ones_like(y_true)-y_pred)+ K.epsilon() focal_loss = - alpha_t * K.pow(( K.ones_like(y_true)-p_t),gamma)* K.log(p_t) return K.mean(focal_loss) return binary_focal_loss_fixed<define_variables>
def train_fn(model, optimizer, scheduler, loss_fn, dataloader, device): model.train() final_loss = 0 train_acc=0 total=0 train_preds=[] for features,labels in dataloader: optimizer.zero_grad() inputs, targets = features.to(device), labels.to(device) outputs = model(inputs) loss = loss_fn(outputs, targets) loss.backward() optimizer.step() scheduler.step() total+=len(targets) final_loss += loss.item() train_preds.append(outputs.sigmoid().detach().cpu().numpy()) _, predicted = torch.max(outputs, 1) train_acc+=(( predicted == targets ).sum().item()) final_loss /= len(dataloader) train_preds = np.concatenate(train_preds) train_acc=(train_acc/total)*100 return final_loss,train_acc def valid_fn(model, loss_fn, dataloader, device): model.eval() final_loss = 0 valid_preds = [] val_acc=0 total=0 for features,labels in dataloader: inputs, targets = features.to(device), labels.to(device) outputs = model(inputs) loss = loss_fn(outputs, targets) total+=len(targets) final_loss += loss.item() valid_preds.append(outputs.sigmoid().detach().cpu().numpy()) _, predicted = torch.max(outputs, 1) val_acc+=(( predicted == targets ).sum().item()) final_loss /= len(dataloader) valid_preds = np.concatenate(valid_preds) val_acc=(val_acc/total)*100 return final_loss, valid_preds,val_acc
Digit Recognizer
13,423,365
NUC_MODEL = '/kaggle/input/hpacellsegmentatormodelweights/dpn_unet_nuclei_v1.pth' CELL_MODEL = '/kaggle/input/hpacellsegmentatormodelweights/dpn_unet_cell_3ch_v1.pth' B2_CELL_CLSFR_DIR = "/kaggle/input/hpa-models/resultsv7/ebnet_b2_wdensehead/ckpt-0006-0.0924.ckpt" DATA_DIR = "/kaggle/input/hpa-single-cell-image-classification" TEST_IMG_DIR = os.path.join(DATA_DIR, "test") TEST_IMG_PATHS = sorted([os.path.join(TEST_IMG_DIR, f_name)for f_name in os.listdir(TEST_IMG_DIR)]) print(f"...The number of testing images is {len(TEST_IMG_PATHS)}" \ f" \t--> i.e.{len(TEST_IMG_PATHS)//4} 4-channel images...") PUB_SS_CSV = "/kaggle/input/hpa-sample-submission-with-extra-metadata/updated_sample_submission.csv" SWAP_SS_CSV = os.path.join(DATA_DIR, "sample_submission.csv") ss_df = pd.read_csv(SWAP_SS_CSV) DO_TTA = True TTA_REPEATS = 8 IS_DEMO = len(ss_df)==559 if IS_DEMO: ss_df_1 = ss_df.drop_duplicates("ImageWidth", keep="first") ss_df_2 = ss_df.drop_duplicates("ImageWidth", keep="last") ss_df = pd.concat([ss_df_1, ss_df_2]) del ss_df_1; del ss_df_2; gc.collect() ; print(" SAMPLE SUBMISSION DATAFRAME ") display(ss_df) else: print(" SAMPLE SUBMISSION DATAFRAME ") display(ss_df) if ONLY_PUBLIC: pub_ss_df = pd.read_csv(PUB_SS_CSV) if IS_DEMO: pub_ss_df_1 = pub_ss_df.drop_duplicates("ImageWidth", keep="first") pub_ss_df_2 = pub_ss_df.drop_duplicates("ImageWidth", keep="last") pub_ss_df = pd.concat([pub_ss_df_1, pub_ss_df_2]) pub_ss_df.mask_rles = pub_ss_df.mask_rles.apply(lambda x: ast.literal_eval(x)) pub_ss_df.mask_bboxes = pub_ss_df.mask_bboxes.apply(lambda x: ast.literal_eval(x)) pub_ss_df.mask_sub_rles = pub_ss_df.mask_sub_rles.apply(lambda x: ast.literal_eval(x)) print(" TEST DATAFRAME W/ MASKS ") display(pub_ss_df )<categorify>
DEVICE =('cuda' if torch.cuda.is_available() else 'cpu') EPOCHS = 12 BATCH_SIZE = 128 LEARNING_RATE = 1e-3 WEIGHT_DECAY = 1e-8 seed=42
Digit Recognizer
13,423,365
def binary_mask_to_ascii(mask, mask_val=1): mask = np.where(mask==mask_val, 1, 0 ).astype(np.bool) if mask.dtype != np.bool: raise ValueError(f"encode_binary_mask expects a binary mask, received dtype == {mask.dtype}") mask = np.squeeze(mask) if len(mask.shape)!= 2: raise ValueError(f"encode_binary_mask expects a 2d mask, received shape == {mask.shape}") mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1) mask_to_encode = mask_to_encode.astype(np.uint8) mask_to_encode = np.asfortranarray(mask_to_encode) encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"] binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION) base64_str = base64.b64encode(binary_str) return base64_str.decode() def rle_encoding(img, mask_val=1): dots = np.where(img.T.flatten() == mask_val)[0] run_lengths = [] prev = -2 for b in dots: if(b>prev+1): run_lengths.extend(( b + 1, 0)) run_lengths[-1] += 1 prev = b return ' '.join([str(x)for x in run_lengths]) def rle_to_mask(rle_string, height, width): rows,cols = height,width rle_numbers = [int(num_string)for num_string in rle_string.split(' ')] rle_pairs = np.array(rle_numbers ).reshape(-1,2) img = np.zeros(rows*cols,dtype=np.uint8) for index,length in rle_pairs: index -= 1 img[index:index+length] = 255 img = img.reshape(cols,rows) img = img.T return img def decode_img(img, img_size=(224,224), testing=False): if not testing: img = tf.image.decode_png(img, channels=1) return tf.cast(tf.image.resize(img, img_size), tf.uint8) else: return tf.image.decode_png(img, channels=1) def preprocess_path_ds(rp, gp, bp, yp, lbl, n_classes=19, img_size=(224,224), combine=True, drop_yellow=True): ri = decode_img(tf.io.read_file(rp), img_size) gi = decode_img(tf.io.read_file(gp), img_size) bi = decode_img(tf.io.read_file(bp), img_size) yi = decode_img(tf.io.read_file(yp), img_size) if combine and drop_yellow: return tf.stack([ri[..., 0], gi[..., 0], bi[..., 0]], axis=-1), tf.one_hot(lbl, n_classes, dtype=tf.uint8) elif combine: return tf.stack([ri[..., 0], gi[..., 0], bi[..., 0], yi[..., 0]], axis=-1), tf.one_hot(lbl, n_classes, dtype=tf.uint8) elif drop_yellow: return ri, gi, bi, tf.one_hot(lbl, n_classes, dtype=tf.uint8) else: return ri, gi, bi, yi, tf.one_hot(lbl, n_classes, dtype=tf.uint8) def create_pred_col(row): if pd.isnull(row.PredictionString_y): return row.PredictionString_x else: return row.PredictionString_y def load_image(img_id, img_dir, testing=False, only_public=False): if only_public: return_axis = -1 clr_list = ["red", "green", "blue"] else: return_axis = 0 clr_list = ["red", "green", "blue", "yellow"] if not testing: rgby = [ np.asarray(Image.open(os.path.join(img_dir, img_id+f"_{c}.png")) , np.uint8)\ for c in ["red", "green", "blue", "yellow"] ] return np.stack(rgby, axis=-1) else: return np.stack( [np.asarray(decode_img(tf.io.read_file(os.path.join(img_dir, img_id+f"_{c}.png")) , testing=True), np.uint8)[..., 0] \ for c in clr_list], axis=return_axis, ) def plot_rgb(arr, figsize=(12,12)) : plt.figure(figsize=figsize) plt.title(f"RGB Composite Image", fontweight="bold") plt.imshow(arr) plt.axis(False) plt.show() def convert_rgby_to_rgb(arr): rgb_arr = np.zeros_like(arr[..., :-1]) rgb_arr[..., 0] = arr[..., 0] rgb_arr[..., 1] = arr[..., 1]+arr[..., 3]/2 rgb_arr[..., 2] = arr[..., 2] return rgb_arr def plot_ex(arr, figsize=(20,6), title=None, plot_merged=True, rgb_only=False): if plot_merged and not rgb_only: n_images=5 elif plot_merged and rgb_only: n_images=4 elif not plot_merged and rgb_only: n_images=4 else: n_images=3 plt.figure(figsize=figsize) if type(title)== str: plt.suptitle(title, fontsize=20, fontweight="bold") for i, c in enumerate(["Red Channel – Microtubles", "Green Channel – Protein of Interest", "Blue - Nucleus", "Yellow – Endoplasmic Reticulum"]): if not rgb_only: ch_arr = np.zeros_like(arr[..., :-1]) else: ch_arr = np.zeros_like(arr) if c in ["Red Channel – Microtubles", "Green Channel – Protein of Interest", "Blue - Nucleus"]: ch_arr[..., i] = arr[..., i] else: if rgb_only: continue ch_arr[..., 0] = arr[..., i] ch_arr[..., 1] = arr[..., i] plt.subplot(1,n_images,i+1) plt.title(f"{c.title() }", fontweight="bold") plt.imshow(ch_arr) plt.axis(False) if plot_merged: plt.subplot(1,n_images,n_images) if rgb_only: plt.title(f"Merged RGB", fontweight="bold") plt.imshow(arr) else: plt.title(f"Merged RGBY into RGB", fontweight="bold") plt.imshow(convert_rgby_to_rgb(arr)) plt.axis(False) plt.tight_layout(rect=[0, 0.2, 1, 0.97]) plt.show() def flatten_list_of_lists(l_o_l, to_string=False): if not to_string: return [item for sublist in l_o_l for item in sublist] else: return [str(item)for sublist in l_o_l for item in sublist] def create_segmentation_maps(list_of_image_lists, segmentator, batch_size=8): all_mask_rles = {} for i in tqdm(range(0, len(list_of_image_lists[0]), batch_size), total=len(list_of_image_lists[0])//batch_size): sub_images = [img_channel_list[i:i+batch_size] for img_channel_list in list_of_image_lists] cell_segmentations = segmentator.pred_cells(sub_images) nuc_segmentations = segmentator.pred_nuclei(sub_images[2]) for j, path in enumerate(sub_images[0]): img_id = path.replace("_red.png", "" ).rsplit("/", 1)[1] nuc_mask, cell_mask = label_cell(nuc_segmentations[j], cell_segmentations[j]) new_name = os.path.basename(path ).replace('red','mask') all_mask_rles[img_id] = [rle_encoding(cell_mask, mask_val=k)for k in range(1, np.max(cell_mask)+1)] return all_mask_rles def get_img_list(img_dir, return_ids=False, sub_n=None): if sub_n is None: sub_n=len(glob(img_dir + '/' + f'*_red.png')) if return_ids: images = [sorted(glob(img_dir + '/' + f'*_{c}.png')) [:sub_n] for c in ["red", "yellow", "blue"]] return [x.replace("_red.png", "" ).rsplit("/", 1)[1] for x in images[0]], images else: return [sorted(glob(img_dir + '/' + f'*_{c}.png')) [:sub_n] for c in ["red", "yellow", "blue"]] def get_contour_bbox_from_rle(rle, width, height, return_mask=True,): mask = rle_to_mask(rle, height, width ).copy() cnts = grab_contours( cv2.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )) x,y,w,h = cv2.boundingRect(cnts[0]) if return_mask: return(x,y,x+w,y+h), mask else: return(x,y,x+w,y+h) def get_contour_bbox_from_raw(raw_mask): cnts = grab_contours( cv2.findContours( raw_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )) xywhs = [cv2.boundingRect(cnt)for cnt in cnts] xys = [(xywh[0], xywh[1], xywh[0]+xywh[2], xywh[1]+xywh[3])for xywh in xywhs] return sorted(xys, key=lambda x:(x[1], x[0])) def pad_to_square(a): if a.shape[1]>a.shape[0]: n_to_add = a.shape[1]-a.shape[0] top_pad = n_to_add//2 bottom_pad = n_to_add-top_pad a = np.pad(a, [(top_pad, bottom_pad),(0, 0),(0, 0)], mode='constant') elif a.shape[0]>a.shape[1]: n_to_add = a.shape[0]-a.shape[1] left_pad = n_to_add//2 right_pad = n_to_add-left_pad a = np.pad(a, [(0, 0),(left_pad, right_pad),(0, 0)], mode='constant') else: pass return a def cut_out_cells(rgby, rles, resize_to=(256,256), square_off=True, return_masks=False, from_raw=True): w,h = rgby.shape[:2] contour_bboxes = [get_contour_bbox(rle, w, h, return_mask=return_masks)for rle in rles] if return_masks: masks = [x[-1] for x in contour_bboxes] contour_bboxes = [x[:-1] for x in contour_bboxes] arrs = [rgby[bbox[1]:bbox[3], bbox[0]:bbox[2],...] for bbox in contour_bboxes] if square_off: arrs = [pad_to_square(arr)for arr in arrs] if resize_to is not None: arrs = [ cv2.resize(pad_to_square(arr ).astype(np.float32), resize_to, interpolation=cv2.INTER_CUBIC)\ for arr in arrs ] if return_masks: return arrs, masks else: return arrs def grab_contours(cnts): if len(cnts)== 2: cnts = cnts[0] elif len(cnts)== 3: cnts = cnts[1] else: raise Exception(( "Contours tuple must have length 2 or 3, " "otherwise OpenCV changed their cv2.findContours return " "signature yet again.Refer to OpenCV's documentation " "in that case")) return cnts def preprocess_row(img_id, img_w, img_h, combine=True, drop_yellow=True): rp = os.path.join(TEST_IMG_DIR, img_id+"_red.png") gp = os.path.join(TEST_IMG_DIR, img_id+"_green.png") bp = os.path.join(TEST_IMG_DIR, img_id+"_blue.png") yp = os.path.join(TEST_IMG_DIR, img_id+"_yellow.png") ri = decode_img(tf.io.read_file(rp),(img_w, img_h), testing=True) gi = decode_img(tf.io.read_file(gp),(img_w, img_h), testing=True) bi = decode_img(tf.io.read_file(bp),(img_w, img_h), testing=True) if not drop_yellow: yi = decode_img(tf.io.read_file(yp),(img_w, img_h), testing=True) if combine and drop_yellow: return tf.stack([ri[..., 0], gi[..., 0], bi[..., 0]], axis=-1) elif combine: return tf.stack([ri[..., 0], gi[..., 0], bi[..., 0], yi[..., 0]], axis=-1) elif drop_yellow: return ri, gi, bi else: return ri, gi, bi, yi def plot_predictions(img, masks, preds, confs=None, fill_alpha=0.3, lbl_as_str=True): FONT = cv2.FONT_HERSHEY_SIMPLEX; FONT_SCALE = 0.7; FONT_THICKNESS = 2; FONT_LINE_TYPE = cv2.LINE_AA; COLORS = [[round(y*255)for y in x] for x in sns.color_palette("Spectral", len(LBL_NAMES)) ] to_plot = img.copy() cntr_img = img.copy() if confs==None: confs = [None,]*len(masks) cnts = grab_contours( cv2.findContours( masks, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )) cnts = sorted(cnts, key=lambda x:(cv2.boundingRect(x)[1], cv2.boundingRect(x)[0])) for c, pred, conf in zip(cnts, preds, confs): color = COLORS[pred[0]] if not lbl_as_str: classes = "CLS=["+",".join([str(p)for p in pred])+"]" else: classes = ", ".join([INT_2_STR[p] for p in pred]) M = cv2.moments(c) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) text_width, text_height = cv2.getTextSize(classes, FONT, FONT_SCALE, FONT_THICKNESS)[0] cv2.drawContours(to_plot, [c], contourIdx=-1, color=[max(0, x-40)for x in color], thickness=10) cv2.drawContours(cntr_img, [c], contourIdx=-1, color=(color), thickness=-1) cv2.putText(to_plot, classes,(cx-text_width//2,cy-text_height//2), FONT, FONT_SCALE, [min(255, x+40)for x in color], FONT_THICKNESS, FONT_LINE_TYPE) cv2.addWeighted(cntr_img, fill_alpha, to_plot, 1-fill_alpha, 0, to_plot) plt.figure(figsize=(16,16)) plt.imshow(to_plot) plt.axis(False) plt.show() def tta(original_img_batch, repeats=4): tta_img_batches = [original_img_batch,] for i in range(repeats): img_batch = original_img_batch SEED = tf.random.uniform(( 2,), minval=0, maxval=100, dtype=tf.dtypes.int32) K = tf.random.uniform(( 1,), minval=0, maxval=4, dtype=tf.dtypes.int32)[0] img_batch = tf.image.stateless_random_flip_left_right(img_batch, SEED) img_batch = tf.image.stateless_random_flip_up_down(img_batch, SEED) img_batch = tf.image.rot90(img_batch, K) img_batch = tf.image.stateless_random_saturation(img_batch, 0.9, 1.1, SEED) img_batch = tf.image.stateless_random_brightness(img_batch, 0.075, SEED) img_batch = tf.image.stateless_random_contrast(img_batch, 0.9, 1.1, SEED) tta_img_batches.append(img_batch) return tta_img_batches<define_variables>
def run_training(seed): train_loader, valid_loader= create_dataloaders(seed=seed) model=Model() model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters() , lr=LEARNING_RATE,weight_decay=WEIGHT_DECAY) scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.1, div_factor=1e2, max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(train_loader)) loss_fn = nn.CrossEntropyLoss() for epoch in range(EPOCHS): train_loss,train_acc = train_fn(model, optimizer,scheduler, loss_fn, train_loader, DEVICE) print(f"EPOCH: {epoch}, train_loss: {train_loss},, train_accuracy:{train_acc}") val_loss, val_preds, val_acc = valid_fn(model, loss_fn, valid_loader, DEVICE) print(f"EPOCH: {epoch}, valid_loss: {val_loss}, val_accuracy:{val_acc}") test_pred = torch.LongTensor() testdataset = TestDataset(test_data) testloader = torch.utils.data.DataLoader(testdataset, batch_size=BATCH_SIZE, shuffle=False) for features in testloader: features=features.to(DEVICE) outputs=model(features) _, predicted = torch.max(outputs, 1) test_pred = torch.cat(( test_pred.to(DEVICE), predicted.to(DEVICE)) , dim=0) pred_df['predict'] = test_pred.cpu().numpy()
Digit Recognizer
13,423,365
inference_model = tf.keras.models.load_model(B2_CELL_CLSFR_DIR) IMAGE_SIZES = [1728, 2048, 3072, 4096] BATCH_SIZE = 20 CONF_THRESH = 0.0 TILE_SIZE =(224,224) if ONLY_PUBLIC: predict_df_1728 = pub_ss_df[pub_ss_df.ImageWidth==IMAGE_SIZES[0]] predict_df_2048 = pub_ss_df[pub_ss_df.ImageWidth==IMAGE_SIZES[1]] predict_df_3072 = pub_ss_df[pub_ss_df.ImageWidth==IMAGE_SIZES[2]] predict_df_4096 = pub_ss_df[pub_ss_df.ImageWidth==IMAGE_SIZES[3]] else: segmentator = cellsegmentator.CellSegmentator(NUC_MODEL, CELL_MODEL, scale_factor=0.275, padding=True) predict_df_1728 = ss_df[ss_df.ImageWidth==IMAGE_SIZES[0]] predict_df_2048 = ss_df[ss_df.ImageWidth==IMAGE_SIZES[1]] predict_df_3072 = ss_df[ss_df.ImageWidth==IMAGE_SIZES[2]] predict_df_4096 = ss_df[ss_df.ImageWidth==IMAGE_SIZES[3]] predict_ids_1728 = predict_df_1728.ID.to_list() predict_ids_2048 = predict_df_2048.ID.to_list() predict_ids_3072 = predict_df_3072.ID.to_list() predict_ids_4096 = predict_df_4096.ID.to_list()<create_dataframe>
pred_df = sample_data.copy() run_training(seed)
Digit Recognizer
13,423,365
predictions = [] sub_df = pd.DataFrame(columns=["ID"], data=predict_ids_1728+predict_ids_2048+predict_ids_3072+predict_ids_4096) for size_idx, submission_ids in enumerate([predict_ids_1728, predict_ids_2048, predict_ids_3072, predict_ids_4096]): size = IMAGE_SIZES[size_idx] if submission_ids==[]: print(f" ...SKIPPING SIZE {size} AS THERE ARE NO IMAGE IDS... ") continue else: print(f" ...WORKING ON IMAGE IDS FOR SIZE {size}... ") for i in tqdm(range(0, len(submission_ids), BATCH_SIZE), total=int(np.ceil(len(submission_ids)/BATCH_SIZE))): batch_rgby_images = [ load_image(ID, TEST_IMG_DIR, testing=True, only_public=ONLY_PUBLIC)\ for ID in submission_ids[i:(i+BATCH_SIZE)] ] if ONLY_PUBLIC: batch_cell_bboxes = pub_ss_df[pub_ss_df.ID.isin(submission_ids[i:(i+BATCH_SIZE)])].mask_bboxes.values batch_rgb_images = batch_rgby_images submission_rles = pub_ss_df[pub_ss_df.ID.isin(submission_ids[i:(i+BATCH_SIZE)])].mask_sub_rles.values if IS_DEMO: batch_masks = [ sum([rle_to_mask(mask, size, size)for mask in batch])\ for batch in pub_ss_df[pub_ss_df.ID.isin(submission_ids[i:(i+BATCH_SIZE)])].mask_rles.values ] else: cell_segmentations = segmentator.pred_cells([[rgby_image[j] for rgby_image in batch_rgby_images] for j in [0, 3, 2]]) nuc_segmentations = segmentator.pred_nuclei([rgby_image[2] for rgby_image in batch_rgby_images]) batch_masks = [label_cell(nuc_seg, cell_seg)[1].astype(np.uint8)for nuc_seg, cell_seg in zip(nuc_segmentations, cell_segmentations)] batch_rgb_images = [rgby_image.transpose(1,2,0)[..., :-1] for rgby_image in batch_rgby_images] batch_cell_bboxes = [get_contour_bbox_from_raw(mask)for mask in batch_masks] submission_rles = [[binary_mask_to_ascii(mask, mask_val=cell_id)for cell_id in range(1, mask.max() +1)] for mask in batch_masks] batch_cell_tiles = [[ cv2.resize( pad_to_square( rgb_image[bbox[1]:bbox[3], bbox[0]:bbox[2],...]), TILE_SIZE, interpolation=cv2.INTER_CUBIC)for bbox in bboxes] for bboxes, rgb_image in zip(batch_cell_bboxes, batch_rgb_images) ] if DO_TTA: tta_batch_cell_tiles = [tta(tf.cast(ct, dtype=tf.float32), repeats=TTA_REPEATS)for ct in batch_cell_tiles] else: batch_cell_tiles = [tf.cast(ct, dtype=tf.float32)for ct in batch_cell_tiles] if DO_TTA: tta_batch_o_preds = [[inference_model.predict(ct)for ct in bct] for bct in tta_batch_cell_tiles] batch_o_preds = [tf.keras.layers.Average()(tta_o_preds ).numpy() for tta_o_preds in tta_batch_o_preds] else: batch_o_preds = [inference_model.predict(cell_tiles)for cell_tiles in batch_cell_tiles] batch_confs = [[pred[np.where(pred>CONF_THRESH)] for pred in o_preds] for o_preds in batch_o_preds] batch_preds = [[np.where(pred>CONF_THRESH)[0] for pred in o_preds] for o_preds in batch_o_preds] for j, preds in enumerate(batch_preds): for k in range(len(preds)) : if preds[k].size==0: batch_preds[j][k]=np.array([18,]) batch_confs[j][k]=np.array([1-np.max(batch_o_preds[j][k]),]) if IS_DEMO: print(" ...DEMO IMAGES... ") for rgb_images, masks, preds, confs in zip(batch_rgb_images, batch_masks, batch_preds, batch_confs): plot_predictions(rgb_images, masks, preds, confs=confs, fill_alpha=0.2, lbl_as_str=True) submission_rles = [flatten_list_of_lists([[m,]*len(p)for m, p in zip(masks, preds)])for masks, preds in zip(submission_rles, batch_preds)] batch_preds = [flatten_list_of_lists(preds, to_string=True)for preds in batch_preds] batch_confs = [[f"{conf:.4f}" for cell_confs in confs for conf in cell_confs] for confs in batch_confs] predictions.extend([" ".join(flatten_list_of_lists(zip(*[preds,confs,masks])))for preds, confs, masks in zip(batch_preds, batch_confs, submission_rles)]) sub_df["PredictionString"] = predictions print(" ...TEST DATAFRAME... ") display(sub_df.head(3))<save_to_csv>
final_pred = pred_df['predict'] sample_data.Label = final_pred.astype(int) sample_data.head()
Digit Recognizer
13,423,365
ss_df = ss_df.merge(sub_df, how="left", on="ID") ss_df["PredictionString"] = ss_df.apply(create_pred_col, axis=1) ss_df = ss_df.drop(columns=["PredictionString_x", "PredictionString_y"]) display(ss_df) torch.cuda.empty_cache()<categorify>
sample_data.to_csv('./submission.csv', index=False )
Digit Recognizer
13,673,359
def auto_select_accelerator() : try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print("Running on TPU:", tpu.master()) except ValueError: strategy = tf.distribute.get_strategy() print(f"Running on {strategy.num_replicas_in_sync} replicas") return strategy def build_decoder(with_labels=True, target_size=(300, 300), ext='jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels=3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels=3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32)/ 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels=True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=""): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else(paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls=AUTO) dset = dset.cache(cache_dir)if cache else dset dset = dset.map(augment_fn, num_parallel_calls=AUTO)if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle)if shuffle else dset dset = dset.batch(bsize ).prefetch(AUTO) return dset<feature_engineering>
data_train = pd.read_csv(".. /input/digit-recognizer/train.csv") X_test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
13,673,359
HPA_MODELS = False COMPETITION_NAME = "hpa-single-cell-image-classification" strategy = auto_select_accelerator() BATCH_SIZE = strategy.num_replicas_in_sync * 16 IMSIZE =(224, 240, 260, 300, 380, 456, 528, 600, 720) load_dir = f"/kaggle/input/{COMPETITION_NAME}/" sub_df = pd.read_csv('.. /input/hpa-single-cell-image-classification/sample_submission.csv') sub_df = sub_df.drop(sub_df.columns[1:],axis=1) for i in range(19): sub_df[f'{i}'] = pd.Series(np.zeros(sub_df.shape[0])) if HPA_MODELS: colours = ['blue', 'green', 'red', 'yellow'] sub_dfs = [] for colour in colours: test_paths = load_dir + "/test/" + sub_df['ID'] + '_' + colour + '.png' label_cols = sub_df.columns[1:] test_decoder = build_decoder(with_labels=False, target_size=(IMSIZE[-1], IMSIZE[-1])) dtest = build_dataset( test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder ) with strategy.scope() : model = tf.keras.models.load_model( f'.. /input/hpa-models/effb7_individual_model_{colour}.h5' ) model.summary() sub_df[label_cols] = model.predict(dtest, verbose=1) sub_dfs.append(sub_df) if colour == 'yellow': sub_df = pd.concat(( sub_df.ID,( sub_dfs[0].iloc[:, 1:] + sub_dfs[1].iloc[:, 1:] + sub_dfs[2].iloc[:, 1:] + sub_dfs[3].iloc[:, 1:] )/4), axis=1) else: test_paths = load_dir + "/test/" + sub_df['ID'] + '_' + 'green' + '.png' label_cols = sub_df.columns[1:] test_decoder = build_decoder(with_labels=False, target_size=(IMSIZE[-1], IMSIZE[-1])) dtest = build_dataset( test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder ) with strategy.scope() : model = tf.keras.models.load_model('.. /input/hpa-models/HPA classification efnb7 train 13cc0d 20/model_green.h5') model.summary() sub_df[label_cols] = model.predict(dtest, verbose=1) ss_df = pd.merge(ss_df, sub_df, on = 'ID', how = 'left') for row in range(ss_df.shape[0]): pred = ss_df.loc[row,'PredictionString'] pred_split = pred.split() for j in range(int(len(pred_split)/3)) : for k in range(19): if int(pred_split[ 3*j ])== k: p = pred_split[ 3*j + 1 ] pred_split[ 3*j + 1 ] = str(ss_df.loc[row, f'{k}']*0.6 + float(p)*0.4) ss_df.loc[row,'PredictionString'] = ' '.join(pred_split) ss_df = ss_df[['ID','ImageWidth','ImageHeight','PredictionString']] ss_df.to_csv('submission.csv',index = False) print(f'HPA_MODELS = {HPA_MODELS}' )<install_modules>
X, y = data_train.drop(labels = ["label"],axis = 1)/255.,data_train["label"] X_test = X_test/255 .
Digit Recognizer
13,673,359
!pip install /kaggle/input/kerasapplications -q !pip install /kaggle/input/efficientnet-keras-source-code/ -q --no-deps<install_modules>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.3, min_lr=0.00001) early_stopping = EarlyStopping( min_delta=0.000001, patience=20, restore_best_weights=True, )
Digit Recognizer
13,673,359
print(" ...INSTALLING AND IMPORTING CELL-PROFILER TOOL(HPACELLSEG )... ") try: except: !pip install -q "/kaggle/input/pycocotools/pycocotools-2.0-cp37-cp37m-linux_x86_64.whl" !pip install -q "/kaggle/input/hpapytorchzoozip/pytorch_zoo-master" !pip install -q "/kaggle/input/hpacellsegmentatormaster/HPA-Cell-Segmentation-master" print(" ...OTHER IMPORTS STARTING... ") print(" \tVERSION INFORMATION") LBL_NAMES = ["Nucleoplasm", "Nuclear Membrane", "Nucleoli", "Nucleoli Fibrillar Center", "Nuclear Speckles", "Nuclear Bodies", "Endoplasmic Reticulum", "Golgi Apparatus", "Intermediate Filaments", "Actin Filaments", "Microtubules", "Mitotic Spindle", "Centrosome", "Plasma Membrane", "Mitochondria", "Aggresome", "Cytosol", "Vesicles", "Negative"] INT_2_STR = {x:LBL_NAMES[x] for x in np.arange(19)} INT_2_STR_LOWER = {k:v.lower().replace(" ", "_")for k,v in INT_2_STR.items() } STR_2_INT_LOWER = {v:k for k,v in INT_2_STR_LOWER.items() } STR_2_INT = {v:k for k,v in INT_2_STR.items() } FIG_FONT = dict(family="Helvetica, Arial", size=14, color=" LABEL_COLORS = [px.colors.label_rgb(px.colors.convert_to_RGB_255(x)) for x in sns.color_palette("Spectral", len(LBL_NAMES)) ] LABEL_COL_MAP = {str(i):x for i,x in enumerate(LABEL_COLORS)} print(" ...IMPORTS COMPLETE... ") ONLY_PUBLIC = True if ONLY_PUBLIC: print(" ...ONLY INFERRING ON PUBLIC TEST DATA(USING PRE-PROCESSED DF )... ") else: gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "...Physical GPUs,", len(logical_gpus), "Logical GPUs... ") except RuntimeError as e: print(e )<install_modules>
skf = StratifiedKFold(n_splits=3,random_state=42,shuffle=True) sub = pd.DataFrame(data=None, index=(range(1,28001)) , columns=None, dtype=None, copy=False) for train_index, val_index in skf.split(X, y): model = keras.Sequential([ keras.layers.Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same', activation ='relu', input_shape =(28,28,1)) , keras.layers.MaxPool2D(pool_size=(2,2)) , keras.layers.BatchNormalization() , keras.layers.Dropout(0.4), keras.layers.Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'), keras.layers.MaxPool2D(pool_size=(2,2)) , keras.layers.BatchNormalization() , keras.layers.Dropout(0.4), keras.layers.Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same', activation ='relu'), keras.layers.MaxPool2D(pool_size=(2,2)) , keras.layers.BatchNormalization() , keras.layers.Dropout(0.4), keras.layers.Flatten() , keras.layers.Dense(28*28, activation='relu'), keras.layers.BatchNormalization() , keras.layers.Dropout(0.4), keras.layers.Dense(28*28, activation='relu'), keras.layers.BatchNormalization() , keras.layers.Dropout(0.4), keras.layers.Dense(28*28, activation='relu'), keras.layers.Dense(10, activation='softmax') ]) X_train, X_val = X[train_index], X[val_index] y_train, y_val = y[train_index], y[val_index] model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit( x=X_train, y=y_train, batch_size=100, epochs=250, verbose=1, callbacks=[early_stopping,learning_rate_reduction], validation_data=(X_val,y_val), shuffle=True) results = model.predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") sub = pd.concat([sub, results],axis=1)
Digit Recognizer
13,673,359
!cp -r.. /input/focallosstensorflowstablefromartemmavrin/focal-loss-master/*./ !pip install./focal-loss-master/<choose_model_class>
sub["result"] = sub.mode(dropna=True,axis=1)[0] result = pd.Series(sub["result"],name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),result],axis = 1) submission = submission.dropna().astype('int32') submission.to_csv("mnist_ansamble_of_cnn.csv",index=False )
Digit Recognizer
14,315,639
def binary_focal_loss(gamma=2, alpha=0.25): alpha = tf.constant(alpha, dtype=tf.float32) gamma = tf.constant(gamma, dtype=tf.float32) def binary_focal_loss_fixed(y_true, y_pred): y_true = tf.cast(y_true, tf.float32) alpha_t = y_true*alpha +(K.ones_like(y_true)-y_true)*(1-alpha) p_t = y_true*y_pred +(K.ones_like(y_true)-y_true)*(K.ones_like(y_true)-y_pred)+ K.epsilon() focal_loss = - alpha_t * K.pow(( K.ones_like(y_true)-p_t),gamma)* K.log(p_t) return K.mean(focal_loss) return binary_focal_loss_fixed<define_variables>
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') images_train, images_val = train_test_split(train, test_size=0.3) label_train = images_train['label'] label_val = images_val['label'] images_train = images_train.drop(['label'],axis = 1) images_val = images_val.drop(['label'],axis = 1) label_train = pd.DataFrame(label_train) label_val = pd.DataFrame(label_val) enc = OneHotEncoder() enc.fit(label_train) enc.fit(label_val) label_train = enc.transform(label_train ).toarray() label_val = enc.transform(label_val ).toarray() images_train =(images_train.values ).astype('float32') images_val =(images_val.values ).astype('float32') images_train = images_train.reshape(images_train.shape[0], 28, 28, 1) images_val = images_val.reshape(images_val.shape[0], 28, 28, 1 )
Digit Recognizer
14,315,639
NUC_MODEL = '/kaggle/input/hpacellsegmentatormodelweights/dpn_unet_nuclei_v1.pth' CELL_MODEL = '/kaggle/input/hpacellsegmentatormodelweights/dpn_unet_cell_3ch_v1.pth' B2_CELL_CLSFR_DIR = "/kaggle/input/hpa-models/HPA - Cellwise Classification TRAINING/ebnet_b2_wdensehead/ckpt-0007-0.0901.ckpt" DATA_DIR = "/kaggle/input/hpa-single-cell-image-classification" TEST_IMG_DIR = os.path.join(DATA_DIR, "test") TEST_IMG_PATHS = sorted([os.path.join(TEST_IMG_DIR, f_name)for f_name in os.listdir(TEST_IMG_DIR)]) print(f"...The number of testing images is {len(TEST_IMG_PATHS)}" \ f" \t--> i.e.{len(TEST_IMG_PATHS)//4} 4-channel images...") PUB_SS_CSV = "/kaggle/input/hpa-sample-submission-with-extra-metadata/updated_sample_submission.csv" SWAP_SS_CSV = os.path.join(DATA_DIR, "sample_submission.csv") ss_df = pd.read_csv(SWAP_SS_CSV) DO_TTA = True TTA_REPEATS = 8 IS_DEMO = len(ss_df)==559 if IS_DEMO: ss_df_1 = ss_df.drop_duplicates("ImageWidth", keep="first") ss_df_2 = ss_df.drop_duplicates("ImageWidth", keep="last") ss_df = pd.concat([ss_df_1, ss_df_2]) del ss_df_1; del ss_df_2; gc.collect() ; print(" SAMPLE SUBMISSION DATAFRAME ") display(ss_df) else: print(" SAMPLE SUBMISSION DATAFRAME ") display(ss_df) if ONLY_PUBLIC: pub_ss_df = pd.read_csv(PUB_SS_CSV) if IS_DEMO: pub_ss_df_1 = pub_ss_df.drop_duplicates("ImageWidth", keep="first") pub_ss_df_2 = pub_ss_df.drop_duplicates("ImageWidth", keep="last") pub_ss_df = pd.concat([pub_ss_df_1, pub_ss_df_2]) pub_ss_df.mask_rles = pub_ss_df.mask_rles.apply(lambda x: ast.literal_eval(x)) pub_ss_df.mask_bboxes = pub_ss_df.mask_bboxes.apply(lambda x: ast.literal_eval(x)) pub_ss_df.mask_sub_rles = pub_ss_df.mask_sub_rles.apply(lambda x: ast.literal_eval(x)) print(" TEST DATAFRAME W/ MASKS ") display(pub_ss_df )<categorify>
model2 = models.Sequential() model2.add(layers.Conv2D(filters = 128, kernel_size=(5, 5), activation='relu', padding='same', input_shape =(28, 28, 1))) model2.add(layers.BatchNormalization()) model2.add(layers.Conv2D(filters = 64, kernel_size=(5, 5), activation='relu', padding='same', input_shape =(28, 28, 1))) model2.add(layers.BatchNormalization()) model2.add(layers.MaxPooling2D(pool_size=(2, 2))) model2.add(layers.Dropout(0.25)) model2.add(layers.Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu')) model2.add(layers.BatchNormalization()) model2.add(layers.Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu')) model2.add(layers.BatchNormalization()) model2.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model2.add(layers.Dropout(0.25)) model2.add(layers.Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu')) model2.add(layers.BatchNormalization()) model2.add(layers.Dropout(0.25)) model2.add(layers.Flatten()) model2.add(layers.Dense(256, activation='relu')) model2.add(layers.BatchNormalization()) model2.add(layers.Dropout(0.25)) model2.add(layers.Dense(10, activation='softmax')) model2.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics='accuracy') learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001) datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=20, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(images_train) estimator = model2.fit(datagen.flow(images_train,label_train, batch_size = 128), epochs = 50, validation_data =(images_val, label_val), steps_per_epoch=230 ,callbacks=[learning_rate_reduction])
Digit Recognizer
14,315,639
<define_variables><EOS>
images_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') images_test =(images_test.values ).astype('float32') images_test = images_test.reshape(images_test.shape[0], 28, 28, 1) y_pred = model2.predict(images_test) y_pred = pd.DataFrame(y_pred) y_pred = pd.Series(y_pred.idxmax(axis=1),index=y_pred.index) y_pred = pd.DataFrame({"ImageId": list(range(1,len(y_pred)+1)) , "Label": y_pred}) y_pred.to_csv("submission_model2.csv", index=False, header=True )
Digit Recognizer
14,227,432
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<create_dataframe>
import numpy as np import pandas as pd from matplotlib import pyplot as plt
Digit Recognizer
14,227,432
predictions = [] sub_df = pd.DataFrame(columns=["ID"], data=predict_ids_1728+predict_ids_2048+predict_ids_3072+predict_ids_4096) for size_idx, submission_ids in enumerate([predict_ids_1728, predict_ids_2048, predict_ids_3072, predict_ids_4096]): size = IMAGE_SIZES[size_idx] if submission_ids==[]: print(f" ...SKIPPING SIZE {size} AS THERE ARE NO IMAGE IDS... ") continue else: print(f" ...WORKING ON IMAGE IDS FOR SIZE {size}... ") for i in tqdm(range(0, len(submission_ids), BATCH_SIZE), total=int(np.ceil(len(submission_ids)/BATCH_SIZE))): batch_rgby_images = [ load_image(ID, TEST_IMG_DIR, testing=True, only_public=ONLY_PUBLIC)\ for ID in submission_ids[i:(i+BATCH_SIZE)] ] if ONLY_PUBLIC: batch_cell_bboxes = pub_ss_df[pub_ss_df.ID.isin(submission_ids[i:(i+BATCH_SIZE)])].mask_bboxes.values batch_rgb_images = batch_rgby_images submission_rles = pub_ss_df[pub_ss_df.ID.isin(submission_ids[i:(i+BATCH_SIZE)])].mask_sub_rles.values if IS_DEMO: batch_masks = [ sum([rle_to_mask(mask, size, size)for mask in batch])\ for batch in pub_ss_df[pub_ss_df.ID.isin(submission_ids[i:(i+BATCH_SIZE)])].mask_rles.values ] else: cell_segmentations = segmentator.pred_cells([[rgby_image[j] for rgby_image in batch_rgby_images] for j in [0, 3, 2]]) nuc_segmentations = segmentator.pred_nuclei([rgby_image[2] for rgby_image in batch_rgby_images]) batch_masks = [label_cell(nuc_seg, cell_seg)[1].astype(np.uint8)for nuc_seg, cell_seg in zip(nuc_segmentations, cell_segmentations)] batch_rgb_images = [rgby_image.transpose(1,2,0)[..., :-1] for rgby_image in batch_rgby_images] batch_cell_bboxes = [get_contour_bbox_from_raw(mask)for mask in batch_masks] submission_rles = [[binary_mask_to_ascii(mask, mask_val=cell_id)for cell_id in range(1, mask.max() +1)] for mask in batch_masks] batch_cell_tiles = [[ cv2.resize( pad_to_square( rgb_image[bbox[1]:bbox[3], bbox[0]:bbox[2],...]), TILE_SIZE, interpolation=cv2.INTER_CUBIC)for bbox in bboxes] for bboxes, rgb_image in zip(batch_cell_bboxes, batch_rgb_images) ] if DO_TTA: tta_batch_cell_tiles = [tta(tf.cast(ct, dtype=tf.float32), repeats=TTA_REPEATS)for ct in batch_cell_tiles] else: batch_cell_tiles = [tf.cast(ct, dtype=tf.float32)for ct in batch_cell_tiles] if DO_TTA: tta_batch_o_preds = [[inference_model.predict(ct)for ct in bct] for bct in tta_batch_cell_tiles] batch_o_preds = [tf.keras.layers.Average()(tta_o_preds ).numpy() for tta_o_preds in tta_batch_o_preds] else: batch_o_preds = [inference_model.predict(cell_tiles)for cell_tiles in batch_cell_tiles] batch_confs = [[pred[np.where(pred>CONF_THRESH)] for pred in o_preds] for o_preds in batch_o_preds] batch_preds = [[np.where(pred>CONF_THRESH)[0] for pred in o_preds] for o_preds in batch_o_preds] for j, preds in enumerate(batch_preds): for k in range(len(preds)) : if preds[k].size==0: batch_preds[j][k]=np.array([18,]) batch_confs[j][k]=np.array([1-np.max(batch_o_preds[j][k]),]) if IS_DEMO: print(" ...DEMO IMAGES... ") for rgb_images, masks, preds, confs in zip(batch_rgb_images, batch_masks, batch_preds, batch_confs): plot_predictions(rgb_images, masks, preds, confs=confs, fill_alpha=0.2, lbl_as_str=True) submission_rles = [flatten_list_of_lists([[m,]*len(p)for m, p in zip(masks, preds)])for masks, preds in zip(submission_rles, batch_preds)] batch_preds = [flatten_list_of_lists(preds, to_string=True)for preds in batch_preds] batch_confs = [[f"{conf:.4f}" for cell_confs in confs for conf in cell_confs] for confs in batch_confs] predictions.extend([" ".join(flatten_list_of_lists(zip(*[preds,confs,masks])))for preds, confs, masks in zip(batch_preds, batch_confs, submission_rles)]) sub_df["PredictionString"] = predictions print(" ...TEST DATAFRAME... ") display(sub_df.head(3))<save_to_csv>
train_data = pd.read_csv('.. /input/digit-recognizer/train.csv') test_data = pd.read_csv('.. /input/digit-recognizer/test.csv') train_data.head()
Digit Recognizer
14,227,432
ss_df = ss_df.merge(sub_df, how="left", on="ID") ss_df["PredictionString"] = ss_df.apply(create_pred_col, axis=1) ss_df = ss_df.drop(columns=["PredictionString_x", "PredictionString_y"]) display(ss_df) torch.cuda.empty_cache()<categorify>
train_labels = train_data['label'] train_data = train_data.drop('label', axis=1 )
Digit Recognizer
14,227,432
def auto_select_accelerator() : try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print("Running on TPU:", tpu.master()) except ValueError: strategy = tf.distribute.get_strategy() print(f"Running on {strategy.num_replicas_in_sync} replicas") return strategy def build_decoder(with_labels=True, target_size=(300, 300), ext='jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels=3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels=3) else: raise ValueError("Image extension not supported") img = tf.cast(img, tf.float32)/ 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return decode(path), label return decode_with_labels if with_labels else decode def build_augmenter(with_labels=True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) return img def augment_with_labels(img, label): return augment(img), label return augment_with_labels if with_labels else augment def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=""): if cache_dir != "" and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else(paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls=AUTO) dset = dset.cache(cache_dir)if cache else dset dset = dset.map(augment_fn, num_parallel_calls=AUTO)if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle)if shuffle else dset dset = dset.batch(bsize ).prefetch(AUTO) return dset<feature_engineering>
encoder = LabelBinarizer() train_labels = encoder.fit_transform(train_labels )
Digit Recognizer
14,227,432
HPA_MODELS = False COMPETITION_NAME = "hpa-single-cell-image-classification" strategy = auto_select_accelerator() BATCH_SIZE = strategy.num_replicas_in_sync * 16 IMSIZE =(224, 240, 260, 300, 380, 456, 528, 600, 720) load_dir = f"/kaggle/input/{COMPETITION_NAME}/" sub_df = pd.read_csv('.. /input/hpa-single-cell-image-classification/sample_submission.csv') sub_df = sub_df.drop(sub_df.columns[1:],axis=1) for i in range(19): sub_df[f'{i}'] = pd.Series(np.zeros(sub_df.shape[0])) if HPA_MODELS: colours = ['blue', 'green', 'red', 'yellow'] sub_dfs = [] for colour in colours: test_paths = load_dir + "/test/" + sub_df['ID'] + '_' + colour + '.png' label_cols = sub_df.columns[1:] test_decoder = build_decoder(with_labels=False, target_size=(IMSIZE[-1], IMSIZE[-1])) dtest = build_dataset( test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder ) with strategy.scope() : model = tf.keras.models.load_model( f'.. /input/hpa-models/effb7_individual_model_{colour}.h5' ) model.summary() sub_df[label_cols] = model.predict(dtest, verbose=1) sub_dfs.append(sub_df) if colour == 'yellow': sub_df = pd.concat(( sub_df.ID,( sub_dfs[0].iloc[:, 1:] + sub_dfs[1].iloc[:, 1:] + sub_dfs[2].iloc[:, 1:] + sub_dfs[3].iloc[:, 1:] )/4), axis=1) else: test_paths = load_dir + "/test/" + sub_df['ID'] + '_' + 'green' + '.png' label_cols = sub_df.columns[1:] test_decoder = build_decoder(with_labels=False, target_size=(IMSIZE[-1], IMSIZE[-1])) dtest = build_dataset( test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder ) with strategy.scope() : model = tf.keras.models.load_model('.. /input/hpa-models/HPA classification efnb7 train 13cc0d 20/model_green.h5') model.summary() sub_df[label_cols] = model.predict(dtest, verbose=1) ss_df = pd.merge(ss_df, sub_df, on = 'ID', how = 'left') for row in range(ss_df.shape[0]): pred = ss_df.loc[row,'PredictionString'] pred_split = pred.split() for j in range(int(len(pred_split)/3)) : for k in range(19): if int(pred_split[ 3*j ])== k: p = pred_split[ 3*j + 1 ] pred_split[ 3*j + 1 ] = str(ss_df.loc[row, f'{k}']*0.6 + float(p)*0.4) ss_df.loc[row,'PredictionString'] = ' '.join(pred_split) ss_df = ss_df[['ID','ImageWidth','ImageHeight','PredictionString']] ss_df.to_csv('submission.csv',index = False) print(f'HPA_MODELS = {HPA_MODELS}' )<install_modules>
train_data = train_data.astype('float32')/ 255 test_data = test_data.astype('float32')/ 255
Digit Recognizer
14,227,432
!pip install /kaggle/input/iterative-stratification/iterative-stratification-master/<install_modules>
train_data = train_data[:, :, :, np.newaxis] test_data = test_data[:, :, :, np.newaxis]
Digit Recognizer
14,227,432
<define_variables>
X_train, X_val, y_train, y_val = train_test_split(train_data, train_labels, test_size=0.1, random_state=157, stratify=train_labels )
Digit Recognizer
14,227,432
package_path = '.. /input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master' sys.path.append(package_path) <import_modules>
data_generator = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, horizontal_flip=False, vertical_flip=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1 ) data_generator.fit(X_train )
Digit Recognizer
14,227,432
import pandas as pd import numpy as np from fastai.vision.all import * import pickle import os<import_modules>
init_relu = he_uniform(seed=157) init_tanh = glorot_uniform(seed=157) model = Sequential() model.add(Conv2D(name='Conv_1', input_shape=(28, 28, 1), filters=64, kernel_size=(3, 3), padding='same', kernel_initializer=init_relu, kernel_constraint=maxnorm(3))) model.add(BatchNormalization()) model.add(Activation(relu)) model.add(Conv2D(name='Conv_2', filters=64, kernel_size=(3, 3), padding='same', kernel_initializer=init_relu, kernel_constraint=maxnorm(3))) model.add(BatchNormalization()) model.add(Activation(relu)) model.add(MaxPooling2D(name='Pooling_1', pool_size=(2, 2), strides=2)) model.add(Dropout(0.20)) model.add(Conv2D(name='Conv_4', filters=32, kernel_size=(3, 3), padding='same', kernel_initializer=init_relu, kernel_constraint=maxnorm(3))) model.add(BatchNormalization()) model.add(Activation(relu)) model.add(Conv2D(name='Conv_5', filters=32, kernel_size=(3, 3), padding='same', kernel_initializer=init_relu, kernel_constraint=maxnorm(3))) model.add(BatchNormalization()) model.add(Activation(relu)) model.add(MaxPooling2D(name='Pooling_2', pool_size=(2, 2), strides=2)) model.add(Dropout(0.20)) model.add(Flatten()) model.add(Dense(name='FC_1', units=512, kernel_initializer=init_relu, kernel_constraint=maxnorm(3))) model.add(BatchNormalization()) model.add(Activation(relu)) model.add(Dropout(0.20)) model.add(Dense(name='FC_2', units=128, kernel_initializer=init_relu, kernel_constraint=maxnorm(3))) model.add(BatchNormalization()) model.add(Activation(relu)) model.add(Dense(name='output', units=10, activation='softmax', kernel_initializer=init_tanh, kernel_constraint=maxnorm(3)) )
Digit Recognizer
14,227,432
<define_variables>
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "" os.environ['PYTHONHASHSEED'] = str(157) random.seed(157) np.random.seed(157) tf.compat.v1.set_random_seed(157) session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph() , config=session_conf) k.set_session(sess )
Digit Recognizer
14,227,432
path = Path('.. /input/hpa-cell-tiles-sample-balanced-dataset' )<load_from_csv>
lr = 0.001 opt = Adam(learning_rate=lr, amsgrad=True) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) checkpoint = ModelCheckpoint('neural_network_checkpoint_training.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min') lr_decay = LearningRateScheduler(lambda x: lr * 0.95 ** x) tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True) model.fit_generator(data_generator.flow(X_train, y_train, batch_size=64), epochs=50, validation_data=(X_val, y_val), shuffle=True, callbacks=[tensorboard, checkpoint, lr_decay], verbose=1) save_model(checkpoint.model, 'neural_network_latest_saved.h5' )
Digit Recognizer
14,227,432
df = pd.read_csv(path/'cell_df.csv' )<feature_engineering>
clf = load_model('neural_network_checkpoint_training.h5') prediction = clf.predict(test_data) prediction = pd.DataFrame(np.argmax(prediction, axis=-1), columns=['Label']) img_idx = pd.DataFrame(np.arange(1, len(prediction)+ 1), columns=['ImageId']) prediction = pd.concat([img_idx, prediction], axis=1) prediction.to_csv('submission_cnn.csv', index=False )
Digit Recognizer
14,079,394
labels = [str(i)for i in range(19)] for x in labels: df[x] = df['image_labels'].apply(lambda r: int(x in r.split('|')) )<count_unique_values>
!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
Digit Recognizer
14,079,394
unique_counts = {} for lbl in labels: unique_counts[lbl] = len(dfs[dfs.image_labels == lbl]) full_counts = {} for lbl in labels: count = 0 for row_label in dfs['image_labels']: if lbl in row_label.split('|'): count += 1 full_counts[lbl] = count counts = list(zip(full_counts.keys() , full_counts.values() , unique_counts.values())) counts = np.array(sorted(counts, key=lambda x:-x[1])) counts = pd.DataFrame(counts, columns=['label', 'full_count', 'unique_count']) counts.set_index('label' ).T <prepare_x_and_y>
!python pytorch-xla-env-setup.py
Digit Recognizer
14,079,394
nfold = 5 seed = 42 y = dfs[labels].values X = dfs[['image_id', 'cell_id']].values dfs['fold'] = np.nan mskf = MultilabelStratifiedKFold(n_splits=nfold, random_state=seed) for i,(_, test_index)in enumerate(mskf.split(X, y)) : dfs.iloc[test_index, -1] = i dfs['fold'] = dfs['fold'].astype('int' )<feature_engineering>
!pip install pytorch_lightning --quiet
Digit Recognizer
14,079,394
dfs['is_valid'] = False dfs['is_valid'][dfs['fold'] == 0] = True<count_values>
torch_xla._XLAC._xla_get_devices()
Digit Recognizer
14,079,394
dfs.is_valid.value_counts()<prepare_x_and_y>
torch.manual_seed(100) np.random.seed(100 )
Digit Recognizer
14,079,394
def get_y(r): return r['image_labels'].split('|') get_y(dfs.loc[12] )<define_variables>
df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') df.iloc[:3,:10]
Digit Recognizer
14,079,394
sample_stats =([0.07237246, 0.04476176, 0.07661699], [0.17179589, 0.10284516, 0.14199627] )<define_variables>
df_train, df_val = train_test_split(df, test_size=.1, stratify=df.label )
Digit Recognizer
14,079,394
dls.show_batch(nrows=3, ncols=3 )<choose_model_class>
def toX(df): return df.values.astype(np.float32 ).reshape(-1,1,28,28)/ 127.5 - 1. class MnistDataLoader(object): def __init__(self, df, bs): self.X = toX(df.iloc[:, 1:]) self.y = df.values[:, 0] self.bs = bs self.n_batches = int(np.ceil(df.shape[0] / bs)) def __len__(self): return self.n_batches def __iter__(self): m = self.y.shape[0] for i in range(self.n_batches): idx = np.random.randint(0, m, self.bs) X = torch.tensor(self.X[idx]) y = torch.tensor(self.y[idx]) yield X, y
Digit Recognizer
14,079,394
def get_learner(lr=1e-3): opt_func = partial(Adam, lr=lr, wd=0.01, eps=1e-8) model = EfficientNet.from_pretrained("efficientnet-b5", advprop=True) model._fc = nn.Linear(2048, dls.c) learn = Learner( dls, model, opt_func=opt_func, metrics=[accuracy_multi, PrecisionMulti() ] ).to_fp16() return learn <choose_model_class>
class ResnetBlock(nn.Module): def __init__(self, channels): super(ResnetBlock, self ).__init__() self.conv = nn.Sequential( nn.Conv2d(channels, channels, 3, 1, 1, bias=False), nn.BatchNorm2d(channels), nn.ReLU() , nn.Conv2d(channels, channels, 3, 1, 1, bias=False), nn.BatchNorm2d(channels)) def forward(self, x): x = F.relu(self.conv(x)+ x) x = F.max_pool2d(x, 2) return F.dropout2d(x,.2, self.training) class MiniResnet(nn.Module): def __init__(self, channels=32): super(MiniResnet, self ).__init__() self.backbone = nn.Sequential( nn.Conv2d(1, channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(channels), nn.ReLU() , ResnetBlock(channels), ResnetBlock(channels) ) self.head = nn.Sequential( nn.Linear(channels*49, 256), nn.ReLU() , nn.Dropout (.2), nn.Linear(256,10)) def forward(self, x): x = self.backbone(x) dims = np.prod(x.shape[1:]) logits = self.head(x.view(-1, dims)) return logits
Digit Recognizer
14,079,394
learn=get_learner()<find_best_params>
class MnistModule(pl.LightningModule): def __init__(self): super().__init__() self.model = MiniResnet() self.loss_fn = nn.CrossEntropyLoss() def forward(self, X): return self.model(X) def training_step(self, batch, batch_i): X, y = batch h = self.model(X) loss = self.loss_fn(h, y) self.log('train_loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True) return loss def validation_step(self, batch, batch_idx): x, y = batch h = self.model(x) loss = self.loss_fn(h, y) accuracy =(h.argmax(1)== y ).float().mean() self.log('valid_loss', loss, on_step=False, on_epoch=True, logger=True) self.log('accuracy', accuracy, on_step=False, on_epoch=True, logger=True) def configure_optimizers(self): opt = torch.optim.Adam(self.parameters()) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt, patience=2) return [opt], [{ 'scheduler': scheduler, 'monitor': 'valid_loss'}] def predict(self, X): with torch.no_grad() : self.eval() logits = self.model(X) return F.softmax(logits, 1) batch_size = 512 train_dl = MnistDataLoader(df_train, batch_size) val_dl = MnistDataLoader(df_val, batch_size) module = MnistModule() trainer = pl.Trainer(max_epochs=30, tpu_cores=1,logger=SimpleLogger()) trainer.fit(module, train_dl, val_dl )
Digit Recognizer
14,079,394
learn.lr_find()<define_search_space>
x = torch.tensor(val_dl.X, device=module.device) h = module.predict(x ).cpu().numpy()
Digit Recognizer
14,079,394
lr=3e-2<find_best_params>
y_hat = h.argmax(1) (y_hat == val_dl.y ).mean()
Digit Recognizer
14,079,394
learn.fine_tune(6,base_lr=lr )<import_modules>
df_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') df_sub = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv' )
Digit Recognizer
14,079,394
from sklearn.metrics import multilabel_confusion_matrix as cm<split>
X = torch.tensor(toX(df_test), device = module.device) %time preds = module.predict(X )
Digit Recognizer
14,079,394
val_targ = dfs[labels][dfs.is_valid == True].values<predict_on_test>
df_sub['Label'] = preds.argmax(1 ).cpu().numpy() df_sub.to_csv('submission.csv', index=False )
Digit Recognizer
14,079,394
<data_type_conversions><EOS>
!rm *.whl !rm *.py !rm *.ckpt !ls
Digit Recognizer
14,110,957
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Conv2D, MaxPool2D, Activation, Dropout, Flatten, Dense, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from keras.optimizers import Adam
Digit Recognizer
14,110,957
val_preds = val_preds > 0.5<data_type_conversions>
df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") print(df.shape) print(test.shape) print(df.head() )
Digit Recognizer
14,110,957
full_preds = val_preds_all[0].numpy()<compute_test_metric>
labels = df["label"] X = df.drop('label', axis = 1) print(labels.value_counts()) print("Baseline Accuracy: " + str(round(labels.value_counts().max() /labels.value_counts().sum() ,3)) )
Digit Recognizer
14,110,957
vis_arr = cm(val_targ, val_preds )<filter>
def normalizeANDreshape(df, minimum, maximum): diff = maximum - minimum zero_min = df - minimum adjusted = zero_min/diff shaped = adjusted.values.reshape(-1,28,28,1) return shaped print(np.max(normalizeANDreshape(X, 0, 255))) print(np.min(normalizeANDreshape(X, 0, 255))) print(type(normalizeANDreshape(X, 0, 255))) print(normalizeANDreshape(X, 0, 255 ).shape )
Digit Recognizer
14,110,957
val = dfs[dfs.is_valid==True] len(val[val['16'] == 1] )<compute_test_metric>
y = pd.get_dummies(labels) y.head()
Digit Recognizer
14,110,957
average_precision = average_precision_score(val_targ, val_preds) average_precision<find_best_params>
X_train, X_val, y_train, y_val = train_test_split(normalizeANDreshape(X, 0, 255), y, test_size = 0.20 )
Digit Recognizer
14,110,957
precision = dict() recall = dict() average_precision = dict() for i in range(19): precision[i], recall[i], _ = precision_recall_curve(val_targ[:, i], val_preds[:, i]) average_precision[i] = average_precision_score(val_targ[:, i], val_preds[:, i]) precision["micro"], recall["micro"], _ = precision_recall_curve(val_targ.ravel() , val_preds.ravel()) average_precision["micro"] = average_precision_score(val_targ, val_preds, average="micro") print('Average precision score, micro-averaged over all classes: {0:0.2f}'.format(average_precision["micro"]))<define_variables>
augment = ImageDataGenerator(rotation_range = 15, width_shift_range = 0.35, height_shift_range = 0.35, zoom_range = 0.2, ) augment.fit(X_train )
Digit Recognizer
14,110,957
path = Path('.. /input/hpa-cell-tiles-test-with-enc-dataset' )<load_from_csv>
def makeCNN(shape): model = Sequential() model.add(Conv2D(filters = 64, kernel_size =(4,4), padding = 'same', activation = 'relu', input_shape = shape)) model.add(MaxPool2D(pool_size=(2,2))) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'same', activation = 'relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'same', activation = 'relu')) model.add(Conv2D(filters = 128, kernel_size =(2,2), padding = 'same', activation = 'relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(64, activation = 'relu')) model.add(Dropout(0.25)) model.add(Dense(10, activation = 'softmax')) return model model = makeCNN(( 28, 28, 1)) model.summary()
Digit Recognizer
14,110,957
df = pd.read_csv(path/'cell_df.csv' )<save_to_csv>
model.compile(optimizer = Adam() , loss = 'categorical_crossentropy', metrics = 'accuracy' )
Digit Recognizer
14,110,957
df.to_csv('cell_df.csv', index=False )<train_model>
epochs = 50 batch_size = 64 history = model.fit_generator(augment.flow(X_train,y_train, batch_size=batch_size), epochs = epochs, steps_per_epoch=len(X_train)// batch_size, validation_data =(X_val,y_val), verbose = 1, use_multiprocessing = True, workers = 2 )
Digit Recognizer
14,110,957
test_dl = learn.dls.test_dl(df )<define_variables>
validation_predictions = model.predict_classes(X_val) confusion = confusion_matrix(validation_predictions,y_val.idxmax(axis=1)) print(confusion )
Digit Recognizer
14,110,957
test_dl.show_batch()<predict_on_test>
test = normalizeANDreshape(test, 0, 255) predictions = model.predict_classes(test) print(predictions[0:5] )
Digit Recognizer
14,110,957
preds, _ = learn.get_preds(dl=test_dl )<load_pretrained>
Id = [] for i in range(len(test)) : Id.append(i+1) output = pd.DataFrame({'ImageID': Id, 'Label': predictions}) output.to_csv('predictions.csv', index=False )
Digit Recognizer
11,475,252
with open('preds.pickle', 'wb')as handle: pickle.dump(preds, handle )<feature_engineering>
import matplotlib.pyplot as plt import tensorflow as tf from sklearn.model_selection import train_test_split
Digit Recognizer
11,475,252
tta, _ = learn.tta(dl=test_dl )<save_to_csv>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
11,475,252
with open('tta.pickle', 'wb')as handle: pickle.dump(tta, handle )<prepare_output>
y = train["label"] y = tf.keras.utils.to_categorical(y, num_classes=10) image_id = list(test.index) image_id = [i+1 for i in image_id] train = train.drop("label", axis=1) train = train.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1 )
Digit Recognizer
11,475,252
cls_prds = torch.argmax(preds, dim=-1) len(cls_prds), cls_prds<load_from_csv>
import keras, os from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPool2D, BatchNormalization, MaxPool2D from keras.callbacks import ModelCheckpoint, EarlyStopping
Digit Recognizer
11,475,252
sample_submission = pd.read_csv('.. /input/hpa-single-cell-image-classification/sample_submission.csv') sample_submission.head()<feature_engineering>
xtrain, xtest, ytrain, ytest = train_test_split(train, y, test_size=0.2) xtrain, xval, ytrain, yval = train_test_split(xtrain, ytrain, test_size=0.25 )
Digit Recognizer
11,475,252
df['cls'] = cls_prds df['pred'] = df[['cls', 'enc']].apply(lambda r: str(r[0])+ ' 1 ' + r[1], axis=1) df.head()<groupby>
class LRFinder: def __init__(self, model): self.model = model self.losses = [] self.lrs = [] self.best_loss = 1e9 def on_batch_end(self, batch, logs): lr = K.get_value(self.model.optimizer.lr) self.lrs.append(lr) loss = logs['loss'] self.losses.append(loss) if math.isnan(loss)or loss > self.best_loss * 4: self.model.stop_training = True return if loss < self.best_loss: self.best_loss = loss lr *= self.lr_mult K.set_value(self.model.optimizer.lr, lr) def find(self, x_train, y_train, start_lr, end_lr, batch_size=64, epochs=1): num_batches = epochs * x_train.shape[0] / batch_size self.lr_mult =(end_lr / start_lr)**(1 / num_batches) self.model.save_weights('tmp.h5') original_lr = K.get_value(self.model.optimizer.lr) K.set_value(self.model.optimizer.lr, start_lr) callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs)) self.model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, callbacks=[callback]) self.model.load_weights('tmp.h5') K.set_value(self.model.optimizer.lr, original_lr) def plot_loss(self, n_skip_beginning=10, n_skip_end=5): plt.ylabel("loss") plt.xlabel("learning rate(log scale)") plt.plot(self.lrs[n_skip_beginning:-n_skip_end], self.losses[n_skip_beginning:-n_skip_end]) plt.xscale('log') def plot_loss_change(self, sma=1, n_skip_beginning=10, n_skip_end=5, y_lim=(-0.01, 0.01)) : assert sma >= 1 derivatives = [0] * sma for i in range(sma, len(self.lrs)) : derivative =(self.losses[i] - self.losses[i - sma])/ sma derivatives.append(derivative) plt.ylabel("rate of loss change") plt.xlabel("learning rate(log scale)") plt.plot(self.lrs[n_skip_beginning:-n_skip_end], derivatives[n_skip_beginning:-n_skip_end]) plt.xscale('log') plt.ylim(y_lim )
Digit Recognizer
11,475,252
subm = df.groupby(['image_id'])['pred'].apply(lambda x: ' '.join(x)).reset_index() subm.head()<merge>
def model() : model = Sequential() model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape =(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(64, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Conv2D(128, kernel_size = 4, activation='relu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.4)) model.add(Dense(10, activation='softmax')) adam = tf.keras.optimizers.Adam() model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=["accuracy"]) early = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50) checkpoint_path = 'training_1/cp.ckpt' checkpoint_dir = os.path.dirname(checkpoint_path) cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1) return model
Digit Recognizer