kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
13,330,495 |
from keras.applications.nasnet import NASNetLarge
from keras.applications.inception_resnet_v2 import InceptionResNetV2<choose_model_class>
|
image_gen_val = ImageDataGenerator(rescale=1./255)
val_data_gen = image_gen_val.flow(X_val, y_val, batch_size=BATCH_SIZE )
|
Digit Recognizer
|
13,330,495 |
base_model = InceptionResNetV2(include_top=False,
weights='imagenet',
input_shape=(331,331,3)
)
base_model.trainable = False<choose_model_class>
|
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3,3), activation='relu', input_shape=(28, 28, 1)) ,
tf.keras.layers.BatchNormalization(axis=1),
tf.keras.layers.Conv2D(32,(3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64,(3,3), activation='relu'),
tf.keras.layers.BatchNormalization(axis=1),
tf.keras.layers.Conv2D(64,(3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
] )
|
Digit Recognizer
|
13,330,495 |
model = Sequential([
base_model,
GlobalAveragePooling2D() ,
Dense(256,activation = 'relu'),
Dropout(0.5),
Dense(120,activation='softmax')
])
model.summary()<choose_model_class>
|
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy() ,
metrics=['accuracy'] )
|
Digit Recognizer
|
13,330,495 |
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'] )<choose_model_class>
|
epochs=20
history = model.fit_generator(
train_data_gen,
steps_per_epoch=int(np.ceil(total_train / float(BATCH_SIZE))),
epochs=epochs,
validation_data=val_data_gen,
validation_steps=int(np.ceil(total_val / float(BATCH_SIZE)))
)
|
Digit Recognizer
|
13,330,495 |
my_calls = [keras.callbacks.EarlyStopping(monitor='val_accuracy',patience=2),
keras.callbacks.ModelCheckpoint("Model.h5",verbose=1,save_best_only=True)]<train_model>
|
epochs=17
history = model.fit_generator(
full_data_gen,
steps_per_epoch=int(np.ceil(total_full / float(BATCH_SIZE))),
epochs=epochs
)
|
Digit Recognizer
|
13,330,495 |
<load_pretrained><EOS>
|
predictions = model.predict_classes(X_test, verbose=0)
submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) ,
"Label": predictions})
submissions.to_csv("DigitsClassif.csv", index=False, header=True )
|
Digit Recognizer
|
13,255,052 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, StratifiedKFold
import os
import math
|
Digit Recognizer
|
13,255,052 |
testgen = Imgen(preprocessing_function=keras.applications.inception_resnet_v2.preprocess_input )<prepare_x_and_y>
|
class LN5(nn.Module):
def __init__(self):
super(LN5, self ).__init__()
self.lc = nn.Sequential(
nn.Conv2d(1, 6, 5, padding=2),
nn.BatchNorm2d(6),
nn.ReLU(inplace=True),
nn.AvgPool2d(2, 2),
nn.Conv2d(6, 16, 5),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.AvgPool2d(2, 2),
)
self.ll = nn.Sequential(
nn.Dropout(0.3),
nn.Linear(400, 120),
nn.BatchNorm1d(120),
nn.ReLU(inplace=True),
nn.Dropout(0.3),
nn.Linear(120, 84),
nn.BatchNorm1d(84),
nn.ReLU(inplace=True),
nn.Dropout(0.3),
nn.Linear(84, 10),
)
def forward(self, x):
x = self.lc(x)
x = x.view(x.size(0), -1)
x = self.ll(x)
return x
|
Digit Recognizer
|
13,255,052 |
test_ds = testgen.flow_from_dataframe(
sample_sub,
directory = '.. /input/dog-breed-identification/test',
x_col = 'id',
y_col = None,
target_size =(331,331),
class_mode= None,
batch_size=32,
shuffle=False
)<predict_on_test>
|
def train_model(tt_loader, conv_model, optimizer, scheduler, criterion):
conv_model.train()
for batch_idx,(data, target)in enumerate(tt_loader):
data = data.unsqueeze(1)
data, target = data, target
optimizer.zero_grad()
output = conv_model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
scheduler.step()
def evaluate(num_epoch, data_loader, conv_model):
conv_model.eval()
loss = 0
correct = 0
for data, target in data_loader:
data = data.unsqueeze(1)
data, target = data, target
output = conv_model(data)
loss += F.cross_entropy(output, target, reduction='sum' ).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
loss /= len(data_loader.dataset)
print('{} Average Val Loss: {:.4f}, Val Accuracy: {}/{}({:.3f}%)'.format(
num_epoch, loss, correct, len(data_loader.dataset),
100.* correct / len(data_loader.dataset)))
return loss
|
Digit Recognizer
|
13,255,052 |
predictions = model.predict(test_ds,verbose=1 )<prepare_output>
|
def kfold(num_model, num_epochs, conv, train_images, train_labels):
kf = StratifiedKFold(n_splits=num_model, shuffle=True, random_state=123)
criterion = nn.CrossEntropyLoss()
for k,(tr_idx, val_idx)in enumerate(kf.split(train_images, train_labels)) :
print('start model {}'.format(k))
conv_model = LN5()
optimizer = optim.Adam(params=conv_model.parameters() , lr=0.005)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)
tt_images = train_images[tr_idx]
tt_labels = train_labels[tr_idx]
val_images = train_images[val_idx]
val_labels = train_labels[val_idx]
tt_images = tt_images.reshape(tt_images.shape[0], 28, 28)
tt_images_tensor = torch.tensor(tt_images)/255.0
tt_labels_tensor = torch.tensor(tt_labels)
tt_tensor = TensorDataset(tt_images_tensor, tt_labels_tensor)
tt_loader = DataLoader(tt_tensor, batch_size=420, shuffle=True)
val_images = val_images.reshape(val_images.shape[0], 28, 28)
val_images_tensor = torch.tensor(val_images)/255.0
val_labels_tensor = torch.tensor(val_labels)
val_tensor = TensorDataset(val_images_tensor, val_labels_tensor)
val_loader = DataLoader(val_tensor, batch_size=420, shuffle=True)
for n in range(num_epochs):
train_model(tt_loader, conv_model, optimizer, scheduler, criterion)
evaluate(n, val_loader, conv_model)
torch.save(conv_model.state_dict() , '.. /input/conv-10-100/conv_{}_{}_{}.pkl'.format(num_model,num_epochs,k))
conv.append(conv_model )
|
Digit Recognizer
|
13,255,052 |
pred = [np.argmax(i)for i in predictions]<define_variables>
|
def load_pkl(num_model, num_epochs, conv):
for k in range(num_model):
conv_model = LN5()
conv_model.load_state_dict(torch.load('.. /input/conv-10-100/conv_{}_{}_{}.pkl'.format(num_model,num_epochs,k)))
conv.append(conv_model)
def total_loss(num_model, conv, train_images, train_labels):
train_images = train_images.reshape(train_images.shape[0], 28, 28)
train_images_tensor = torch.tensor(train_images)/255.0
train_labels_tensor = torch.tensor(train_labels)
train_tensor = TensorDataset(train_images_tensor, train_labels_tensor)
train_loader = DataLoader(train_tensor, batch_size=420, shuffle=True)
loss = []
for k in range(num_model):
loss.append(evaluate(-1, train_loader, conv[k]))
return loss
def predictions(num_model, conv, loss):
for idx in range(num_model):
conv[idx].eval()
test_df = pd.read_csv(".. /input/digit-recognizer/test.csv")
test_images =(test_df.iloc[:,:].values ).astype('float32')
test_images = test_images.reshape(test_images.shape[0], 28, 28)
test_images_tensor = torch.tensor(test_images)/255.0
test_loader = DataLoader(test_images_tensor, batch_size=280, shuffle=False)
test_preds = torch.LongTensor()
for i, data in enumerate(test_loader):
data = data.unsqueeze(1)
output = conv[0](data)*(0.1-loss[0])
for idx in range(1, num_model):
output = output + conv[idx](data)*(0.02-loss[idx])
preds = output.cpu().data.max(1, keepdim=True)[1]
test_preds = torch.cat(( test_preds, preds), dim=0)
submission_df = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv")
submission_df['Label'] = test_preds.numpy().squeeze()
submission_df.head()
submission_df.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
13,255,052 |
file_list = test_ds.filenames
id_list = []
for name in file_list:
m = re.sub('test/', '', name)
m = re.sub('.jpg', '', m)
id_list.append(m )<load_from_csv>
|
train_df = pd.read_csv(".. /input/digit-recognizer/train.csv")
train_labels = train_df['label'].values
train_images =(train_df.iloc[:,1:].values ).astype('float32')
num_model = 10
num_epochs = 100
conv = []
|
Digit Recognizer
|
13,255,052 |
submission = pd.read_csv('.. /input/dog-breed-identification/sample_submission.csv' )<prepare_output>
|
load_pkl(num_model, num_epochs, conv)
loss = total_loss(num_model, conv, train_images, train_labels)
predictions(num_model, conv, loss )
|
Digit Recognizer
|
7,484,184 |
submission['id'] = id_list
submission.iloc[:,1:] =predictions
submission.head()<save_to_csv>
|
train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
X_train = train_df.iloc[:, 1:].values.astype('float32')
y_train = train_df.iloc[:, 0].values.astype('int16')
X_test = test_df.values.astype('float32' )
|
Digit Recognizer
|
7,484,184 |
final_df = submission.set_index('id')
final_df.to_csv('Submission.csv' )<set_options>
|
X_train = X_train / 255
X_test = X_test / 255
y_train = to_categorical(y_train)
y_train.shape
|
Digit Recognizer
|
7,484,184 |
%%time
%matplotlib inline<define_variables>
|
seed = 10
np.random.seed(seed )
|
Digit Recognizer
|
7,484,184 |
%%time
train_dir = '.. /input/dog-breed-identification/train'
test_dir ='.. /input/dog-breed-identification/test'<load_from_csv>
|
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, BatchNormalization, Dense, Flatten, Dropout
|
Digit Recognizer
|
7,484,184 |
%%time
def append_ext(fn):
return fn+".jpg"
traindf = pd.read_csv('.. /input/dog-breed-identification/labels.csv',dtype=str)
testdf = pd.read_csv('.. /input/dog-breed-identification/sample_submission.csv',dtype=str)
traindf["id"] = traindf["id"].apply(append_ext)
testdf["id"] = testdf["id"].apply(append_ext)
<define_variables>
|
train_x, val_x, train_y, val_y = train_test_split(X_train, y_train, test_size = 0.10, random_state = 42, stratify = y_train )
|
Digit Recognizer
|
7,484,184 |
%%time
train_datagen=ImageDataGenerator(rescale=1./255.,
horizontal_flip = True,
validation_split=0.02
)<define_variables>
|
model = Sequential()
model.add(Convolution2D(32,(5, 5), activation = 'relu', padding = 'same', input_shape =(28, 28, 1)))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Convolution2D(64,(3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Dropout(0.3))
model.add(Convolution2D(128,(3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(256,(3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(Dropout(0.25))
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.3))
model.add(Dense(10, activation = 'softmax'))
model.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy'] )
|
Digit Recognizer
|
7,484,184 |
BATCH_SIZE = 32<define_search_space>
|
history = model.fit(train_x, train_y, epochs = 15, batch_size = 64, validation_data =(val_x, val_y))
|
Digit Recognizer
|
7,484,184 |
<prepare_x_and_y><EOS>
|
predictions = model.predict_classes(X_test, verbose = 0)
submissions = pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) ,
"Label": predictions})
submissions.to_csv("digit.csv", index = False, header = True )
|
Digit Recognizer
|
13,182,620 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<prepare_x_and_y>
|
submit = True
|
Digit Recognizer
|
13,182,620 |
x,y = next(train_generator )<create_dataframe>
|
( x_train, y_train),(x_test, y_test)= tf.keras.datasets.mnist.load_data()
x_train_norm = x_train/255.
x_test_norm = x_test/255 .
|
Digit Recognizer
|
13,182,620 |
valid_generator=train_datagen.flow_from_dataframe(
dataframe=traindf,
directory=train_dir,
x_col="id",
y_col="breed",
subset="validation",
batch_size=BATCH_SIZE,
seed=42,
shuffle=True,
class_mode="categorical",
target_size=image_size,
color_mode="rgb")
<prepare_x_and_y>
|
kaggle = pd.read_csv('.. /input/digit-recognizer/test.csv')
kaggle_norm = np.asarray(kaggle/255.)
k_train = pd.read_csv('.. /input/digit-recognizer/train.csv')
k_labels = np.asarray(k_train['label'])
k_train = k_train.drop(columns=['label'])
k_train_norm = np.asarray(k_train/255.)
|
Digit Recognizer
|
13,182,620 |
test_datagen=ImageDataGenerator(rescale=1./255.)
test_generator=test_datagen.flow_from_dataframe(
dataframe=testdf,
directory=test_dir,
x_col="id",
y_col=None,
batch_size=BATCH_SIZE,
seed=42,
shuffle=False,
class_mode=None,
target_size=image_size,
color_mode="rgb" )<choose_model_class>
|
k_train_norm = k_train_norm.reshape(42000, 28, 28, 1)
kaggle_norm = kaggle_norm.reshape(28000, 28, 28, 1)
x_train_norm = x_train_norm.reshape(60000, 28, 28, 1)
x_test_norm = x_test_norm.reshape(10000, 28, 28, 1 )
|
Digit Recognizer
|
13,182,620 |
pretrained_model = tf.keras.applications.InceptionV3(
weights='imagenet',
include_top=False ,
input_shape=shape
)
pretrained_model.trainable = False
model = tf.keras.Sequential([
pretrained_model,
tf.keras.layers.GlobalAveragePooling2D() ,
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(120, activation='softmax')
] )<choose_model_class>
|
def train_model(model, train_features, train_label, epochs,
batch_size=None, validation_split=None):
history = model.fit(x=train_features, y=train_label,
batch_size=batch_size,
epochs=epochs, shuffle=True,
validation_split=validation_split,
verbose = 1)
epochs = history.epoch
hist = pd.DataFrame(history.history)
return epochs, hist
|
Digit Recognizer
|
13,182,620 |
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
opt=tf.keras.optimizers.SGD(lr=1e-3, momentum=0.9)
model.compile(optimizer = opt ,
loss="categorical_crossentropy",
metrics=["accuracy"])
model.summary()<choose_model_class>
|
def create_X(learning_rate):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, 4, activation='relu', input_shape=(28, 28, 1)))
model.add(tf.keras.layers.MaxPooling2D(( 2,2)))
model.add(tf.keras.layers.Conv2D(64, 2, activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(( 2,2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=300, activation='relu'))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Dense(units=100, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
return model
|
Digit Recognizer
|
13,182,620 |
reduce = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',factor=0.2,patience=5, min_lr=0.001)
early = tf.keras.callbacks.EarlyStopping(patience=2,
min_delta=0.001,
restore_best_weights=True )<train_model>
|
def getKaggles() :
kaggles = pd.DataFrame(columns=['ImageId','Label'])
predicts = convoluterX.predict(kaggle_norm)
for j in range(len(kaggle_norm)) :
probs = predicts[j]
max_id = np.argmax(probs)
kaggles.at[j,'ImageId'] = j+1
kaggles.at[j,'Label'] = max_id
return kaggles
kaggles = getKaggles()
kaggles.to_csv('submission.csv', columns=["ImageId","Label"], index=False )
|
Digit Recognizer
|
13,020,289 |
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
history = model.fit(train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=10,
callbacks=[early], )<compute_test_metric>
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Input
import matplotlib.pyplot as plt
|
Digit Recognizer
|
13,020,289 |
score = model.evaluate(valid_generator,batch_size=32)
print("Accuracy: {:.2f}%".format(score[1] * 100))
print("Loss: ",score[0])
<import_modules>
|
data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
data_sub = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
X = data.iloc[:, 1:]
y = data.iloc[:, 0]
|
Digit Recognizer
|
13,020,289 |
from sklearn.metrics import f1_score<predict_on_test>
|
X_train, X_test = X[:40000]/255.0, X[40000:]/255.0
y_train, y_test = y[:40000], y[40000:]
X_valid, y_valid = X_train[:10000], y_train[:10000]
|
Digit Recognizer
|
13,020,289 |
Y_pred = model.predict(valid_generator)
y_pred = np.argmax(Y_pred, axis=1 )<compute_test_metric>
|
model = tf.keras.models.Sequential([
Input(shape=X_train.shape[1:]),
Dense(256, activation='sigmoid'),
Dense(128, activation='sigmoid'),
Dense(10, activation='softmax'),
])
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
|
Digit Recognizer
|
13,020,289 |
f_score = f1_score(valid_generator.classes,y_pred,average='macro')
print('F1 score:',f_score)
<predict_on_test>
|
model.evaluate(X_test, y_test )
|
Digit Recognizer
|
13,020,289 |
pred=model.predict(test_generator )<load_from_csv>
|
model = tf.keras.Sequential([
Input(shape=X_train.shape[1:]),
Dense(256, activation='relu'),
Dense(128, activation='relu'),
Dense(10, activation='softmax'),
])
model.compile(optimizer='RMSProp',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
|
Digit Recognizer
|
13,020,289 |
df_submission = pd.read_csv('/kaggle/input/dog-breed-identification/sample_submission.csv')
df_submission.head()<define_variables>
|
print('Accuracy {}'.format(np.round(model.evaluate(X_test, y_test)[1], 4)) )
|
Digit Recognizer
|
13,020,289 |
file_list = test_generator.filenames
id_list = []
for name in file_list:
m = re.sub('test/', '', name)
m = re.sub('.jpg', '', m)
id_list.append(m )<prepare_output>
|
model = tf.keras.Sequential([
Input(shape=X_train.shape[1:]),
Dense(256, activation='relu'),
BatchNormalization() ,
Dropout(0.1),
Dense(128, activation='relu'),
BatchNormalization() ,
Dropout(0.45),
Dense(10, activation='softmax'),
])
model.compile(optimizer='RMSProp',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
|
Digit Recognizer
|
13,020,289 |
df_submission['id'] = id_list
df_submission.iloc[:,1:] = pred
df_submission.head()<define_variables>
|
print('Accuracy {}'.format(np.round(model.evaluate(X_test, y_test)[1], 4)) )
|
Digit Recognizer
|
13,020,289 |
breeds=['id','beagle','chihuahua','doberman','french_bulldog', 'golden_retriever', 'malamute','pug','saint_bernard','scottish_deerhound','tibetan_mastiff']<filter>
|
X_train, X_valid, X_test = np.array(X_train ).reshape(-1, 28, 28, 1), np.array(X_valid ).reshape(-1, 28, 28, 1), np.array(X_test ).reshape(-1, 28, 28, 1 )
|
Digit Recognizer
|
13,020,289 |
selected_breeds = df_submission.loc[:,breeds]<save_to_csv>
|
model = tf.keras.models.Sequential([
Input(shape=X_train.shape[1:]),
Conv2D(32, 7, activation='relu', padding='same'),
Conv2D(32, 5, activation='relu', padding='same'),
MaxPooling2D(pool_size=(2,2)) ,
BatchNormalization() ,
Dropout(0.3),
Conv2D(64, 5, activation='relu', padding='same'),
Conv2D(64, 5, activation='relu', padding='same'),
MaxPooling2D(pool_size=(2,2)) ,
BatchNormalization() ,
Dropout(0.3),
Flatten() ,
Dense(256, activation='relu'),
BatchNormalization() ,
Dropout(0.5),
Dense(10, activation='softmax')
])
model.compile(optimizer='RMSProp',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, y_train,
epochs=20,
validation_data=(X_valid, y_valid))
|
Digit Recognizer
|
13,020,289 |
final_sub = df_submission.set_index('id')
final_sub.to_csv('Submission.csv' )<set_options>
|
print('Accuracy {}'.format(np.round(model.evaluate(X_test, y_test)[1], 4)) )
|
Digit Recognizer
|
13,020,289 |
%%time
%matplotlib inline<define_variables>
|
train_datagen = ImageDataGenerator(
rotation_range=15,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
validation_split = 0.25
)
valid_datagen = ImageDataGenerator(
rotation_range=15,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
validation_split = 0.25
)
train_datagen.fit(X_train)
valid_datagen.fit(X_valid)
train_generator = train_datagen.flow(X_train, y_train, batch_size=50, subset='training')
valid_generator = valid_datagen.flow(X_valid, y_valid, batch_size=50, subset='validation' )
|
Digit Recognizer
|
13,020,289 |
%%time
train_dir = '.. /input/dog-breed-identification/train'
test_dir ='.. /input/dog-breed-identification/test'<load_from_csv>
|
history = model.fit_generator(generator=train_generator,
validation_data=valid_generator,
epochs = 20 )
|
Digit Recognizer
|
13,020,289 |
%%time
def append_ext(fn):
return fn+".jpg"
traindf = pd.read_csv('.. /input/dog-breed-identification/labels.csv',dtype=str)
testdf = pd.read_csv('.. /input/dog-breed-identification/sample_submission.csv',dtype=str)
traindf["id"] = traindf["id"].apply(append_ext)
testdf["id"] = testdf["id"].apply(append_ext)
<define_variables>
|
print('Accuracy {}'.format(np.round(model.evaluate(X_test, y_test)[1], 4)) )
|
Digit Recognizer
|
13,020,289 |
%%time
train_datagen=ImageDataGenerator(rescale=1./255.,
zoom_range = [0.7,1],
horizontal_flip = True,
validation_split=0.05
)<define_variables>
|
history = model.fit_generator(generator=train_generator,
validation_data=valid_generator,
epochs = 200,
callbacks=[checkpoint, early_stopping] )
|
Digit Recognizer
|
13,020,289 |
BATCH_SIZE = 32<prepare_x_and_y>
|
model = tf.keras.models.load_model('model.h5')
model.evaluate(X_test, y_test )
|
Digit Recognizer
|
13,020,289 |
train_generator=train_datagen.flow_from_dataframe(
dataframe=traindf,
directory=train_dir,
x_col="id",
y_col="breed",
subset="training",
batch_size=BATCH_SIZE,
seed=42,
shuffle=True,
class_mode="categorical",
target_size=(224,224),
color_mode="rgb"
)<prepare_x_and_y>
|
data_sub = np.array(data_sub ).reshape(-1, 28, 28 , 1 ).astype('float32')/ 255
|
Digit Recognizer
|
13,020,289 |
x,y = next(train_generator )<create_dataframe>
|
preds = model.predict(data_sub )
|
Digit Recognizer
|
13,020,289 |
valid_generator=train_datagen.flow_from_dataframe(
dataframe=traindf,
directory=train_dir,
x_col="id",
y_col="breed",
subset="validation",
batch_size=BATCH_SIZE,
seed=42,
shuffle=True,
class_mode="categorical",
target_size=(224,224),
color_mode="rgb")
<prepare_x_and_y>
|
np.argmax(preds[0] )
|
Digit Recognizer
|
13,020,289 |
test_datagen=ImageDataGenerator(rescale=1./255.)
test_generator=test_datagen.flow_from_dataframe(
dataframe=testdf,
directory=test_dir,
x_col="id",
y_col=None,
batch_size=BATCH_SIZE,
seed=42,
shuffle=False,
class_mode=None,
target_size=(224,224),
color_mode="rgb" )<choose_model_class>
|
labels = [np.argmax(x)for x in preds]
ids = [x+1 for x in range(len(preds)) ]
sub = pd.DataFrame()
|
Digit Recognizer
|
13,020,289 |
pretrained_model = tf.keras.applications.ResNet50V2(
weights='imagenet',
include_top=False ,
input_shape=[224,224,3]
)
pretrained_model.trainable = False
model = tf.keras.Sequential([
pretrained_model,
tf.keras.layers.GlobalAveragePooling2D() ,
tf.keras.layers.Dense(120, activation='softmax')
] )<choose_model_class>
|
sub['ImageId'] = ids
sub['Label'] = labels
sub.to_csv('mnist_submission.csv', index=False)
pd.read_csv('mnist_submission.csv' )
|
Digit Recognizer
|
13,009,821 |
opt=tf.keras.optimizers.SGD(lr=1e-4, momentum=0.9)
model.compile(optimizer = opt ,
loss="categorical_crossentropy",
metrics=["accuracy"])
model.summary()<choose_model_class>
|
train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
13,009,821 |
early = tf.keras.callbacks.EarlyStopping(patience=2,
min_delta=0.001,
restore_best_weights=True )<train_model>
|
X = train.drop("label",axis=1 ).values.reshape(-1,28,28,1)
y = train["label"].values
X_test = test.values.reshape(-1,28,28,1 )
|
Digit Recognizer
|
13,009,821 |
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
history = model.fit(train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=25,
callbacks=[early], )<compute_test_metric>
|
X = X.astype('float32')
X_test = X_test.astype('float32')
X = X/255.0
X_test = X_test/255.0
y = tensorflow.keras.utils.to_categorical(y,10 )
|
Digit Recognizer
|
13,009,821 |
score = model.evaluate(valid_generator,batch_size=32)
print("Accuracy: {:.2f}%".format(score[1] * 100))
print("Loss: ",score[0])
<import_modules>
|
X_train,X_val,y_train,y_val = train_test_split(X,y,test_size=0.2,random_state=1 )
|
Digit Recognizer
|
13,009,821 |
from sklearn.metrics import f1_score, confusion_matrix<define_variables>
|
Digit Recognizer
|
|
13,009,821 |
target_names = []
for key in train_generator.class_indices:
target_names.append(key )<predict_on_test>
|
nn = 10
model = [0]*nn
for j in range(nn):
model[j] = Sequential()
model[j].add(Conv2D(24,kernel_size=(3,3),padding='same',activation='relu',input_shape=(28,28,1)))
model[j].add(BatchNormalization())
model[j].add(Conv2D(24,kernel_size=(3,3),padding='same',activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Dropout(0.4))
model[j].add(Conv2D(filters=64,kernel_size=(3,3),activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Conv2D(filters=64,kernel_size=(3,3),activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Conv2D(64,kernel_size=5,strides=2,padding='same',activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Dropout(0.4))
model[j].add(Conv2D(128,kernel_size=4,activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Flatten())
model[j].add(Dropout(0.4))
model[j].add(Dense(10,activation='softmax'))
model[j].compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
|
Digit Recognizer
|
13,009,821 |
Y_pred = model.predict(valid_generator)
y_pred = np.argmax(Y_pred, axis=1 )<compute_test_metric>
|
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1
)
datagen.fit(X )
|
Digit Recognizer
|
13,009,821 |
cm = confusion_matrix(valid_generator.classes,y_pred)
<compute_test_metric>
|
epochs = 50
history = [0]*nn
for j in range(nn):
X_train,X_val,y_train,y_val = train_test_split(X,y,test_size=0.1,random_state=7)
history[j] = model[j].fit_generator(datagen.flow(X_train,y_train,batch_size=64),
epochs=epochs,validation_data=(X_val,y_val),
verbose=1)
|
Digit Recognizer
|
13,009,821 |
f_score = f1_score(valid_generator.classes,y_pred,average='macro')
print('F1 score:',f_score)
<predict_on_test>
|
results = np.zeros(( X_test.shape[0],10))
for j in range(nn):
results = results+model[j].predict(X_test)
results = np.argmax(results,axis=1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name='ImageId'),results],axis=1)
submission.to_csv('ENSEMBLE.csv',index=False )
|
Digit Recognizer
|
13,009,821 |
pred=model.predict(test_generator )<load_from_csv>
|
model.fit(datagen.flow(X_train,y_train,batch_size=128),epochs=10,validation_data=(X_val,y_val))
|
Digit Recognizer
|
13,009,821 |
df_submission = pd.read_csv('/kaggle/input/dog-breed-identification/sample_submission.csv')
df_submission.head()<define_variables>
|
model.fit(datagen.flow(X,y,batch_size=128),epochs=20,verbose=1 )
|
Digit Recognizer
|
13,009,821 |
file_list = test_generator.filenames
id_list = []
for name in file_list:
m = re.sub('test/', '', name)
m = re.sub('.jpg', '', m)
id_list.append(m )<prepare_output>
|
model_log = model.fit(X_train,y_train,batch_size=128,epochs=10,verbose=1,validation_data=(X_val,y_val))
|
Digit Recognizer
|
13,009,821 |
df_submission['id'] = id_list
df_submission.iloc[:,1:] = pred
df_submission.head()<define_variables>
|
model_log = model.fit(X,y,batch_size=128,epochs=20,verbose=1 )
|
Digit Recognizer
|
13,009,821 |
breeds=['id','beagle','chihuahua','doberman','french_bulldog', 'golden_retriever', 'malamute','pug','saint_bernard','scottish_deerhound','tibetan_mastiff']<filter>
|
X_train = X_train.reshape(X_train.shape[0],28,28,1)
X_test = X_test.reshape(X_test.shape[0],28,28,1)
X_train.astype('float32')
X_test.astype('float32')
X_train=X_train/255
X_val=X_val/255
|
Digit Recognizer
|
13,009,821 |
selected_breeds = df_submission.loc[:,breeds]<save_to_csv>
|
import numpy as np
import pandas as pd
|
Digit Recognizer
|
13,009,821 |
final_sub = df_submission.set_index('id')
final_sub.to_csv('Submission.csv' )<import_modules>
|
results = model.predict(X_test )
|
Digit Recognizer
|
13,009,821 |
from sklearn.preprocessing impbort LabelEncoder
<load_from_csv>
|
np.argmax(results,axis=1 )
|
Digit Recognizer
|
13,009,821 |
comp_df = pd.read_csv('.. /input/dog-breed-identification/labels.csv')
test_df = pd.read_csv('.. /input/dog-breed-identification/sample_submission.csv')
print('Training set: {}, Test set: {}'.format(comp_df.shape[0],test_df.shape[0]))<count_values>
|
results = np.argmax(results,axis=1)
results = pd.Series(results,name='Label')
submission = pd.concat([pd.Series(range(1,28001),name='ImageId'),results],axis=1)
submission.to_csv('submission8.csv',index=False )
|
Digit Recognizer
|
12,997,499 |
comp_df.breed.value_counts()<categorify>
|
%matplotlib inline
np.random.seed(2 )
|
Digit Recognizer
|
12,997,499 |
comp_df['label'] = LabelEncoder().fit_transform(comp_df.breed)
dict_df = comp_df[['label','breed']].copy()
dict_df.drop_duplicates(inplace=True)
dict_df.set_index('label',drop=True,inplace=True)
index_to_breed = dict_df.to_dict() ['breed']<feature_engineering>
|
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
|
Digit Recognizer
|
12,997,499 |
train_dir = '.. /input/dog-breed-identification/train'
comp_df.id = comp_df.id.apply(lambda x: x+'.jpg')
comp_df.id = comp_df.id.apply(lambda x:train_dir+'/'+x)
comp_df.pop('breed' )<prepare_x_and_y>
|
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
|
Digit Recognizer
|
12,997,499 |
class img_dataset(Dataset):
def __init__(self,dataframe,transform=None,test=False):
self.dataframe = dataframe
self.transform = transform
self.test = test
def __getitem__(self,index):
x = Image.open(self.dataframe.iloc[index,0])
if self.transform:
x = self.transform(x)
if self.test:
return x
else:
y = self.dataframe.iloc[index,1]
return x,y
def __len__(self):
return self.dataframe.shape[0]<define_search_model>
|
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
12,997,499 |
train_transformer = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomRotation(15),
transforms.RandomHorizontalFlip() ,
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
val_transformer = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])] )<train_model>
|
X_train /= 255.0
test /= 255.0
|
Digit Recognizer
|
12,997,499 |
def print_epoch_result(train_loss,train_acc,val_loss,val_acc):
print('loss: {:.3f}, acc: {:.3f}, val_loss: {:.3f}, val_acc: {:.3f}'.format(train_loss,
train_acc,
val_loss,
val_acc))
def train_model(model, cost_function, optimizer,num_epochs=5):
train_losses = []
val_losses = []
train_acc = []
val_acc = []
train_acc_object = metrics.Accuracy(compute_on_step=False)
val_acc_object = metrics.Accuracy(compute_on_step=False)
for epoch in range(num_epochs):
print('-'*15)
print('Start training {}/{}'.format(epoch+1, num_epochs))
print('-'*15)
train_sub_losses = []
model.train()
for x,y in train_loader:
optimizer.zero_grad()
x,y = x.to(device),y.to(device)
y_hat = model(x)
loss = cost_function(y_hat,y)
loss.backward()
optimizer.step()
train_sub_losses.append(loss.item())
train_acc_object(y_hat.cpu() ,y.cpu())
val_sub_losses = []
model.eval()
for x,y in val_loader:
x,y = x.to(device),y.to(device)
y_hat = model(x)
loss = cost_function(y_hat,y)
val_sub_losses.append(loss.item())
val_acc_object(y_hat.cpu() ,y.cpu())
train_losses.append(np.mean(train_sub_losses))
val_losses.append(np.mean(val_sub_losses))
train_epoch_acc = train_acc_object.compute()
val_epoch_acc = val_acc_object.compute()
train_acc.append(train_epoch_acc)
val_acc.append(val_epoch_acc)
train_acc_object.reset()
val_acc_object.reset()
print_epoch_result(np.mean(train_sub_losses),train_epoch_acc,np.mean(val_sub_losses),val_epoch_acc)
print('Finish Training.')
return train_losses, train_acc, val_losses, val_acc<set_options>
|
Y_train = to_categorical(Y_train, num_classes = 10 )
|
Digit Recognizer
|
12,997,499 |
device = torch.device('cuda:0' if torch.cuda.is_available else 'cpu' )<split>
|
random_seed = 2
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed )
|
Digit Recognizer
|
12,997,499 |
training_samples = comp_df.shape[0]
test_size=0.05
batch_size = 64
sample_df = comp_df.sample(training_samples)
x_train,x_val,_,_ = train_test_split(sample_df,sample_df,test_size=test_size)
train_set = img_dataset(x_train, transform=train_transformer)
val_set = img_dataset(x_val, transform=val_transformer)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_set , batch_size=batch_size, shuffle=True)
print('Training set: {}, Validation set: {}'.format(x_train.shape[0],x_val.shape[0]))<choose_model_class>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Valid', activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
model.compile(loss='categorical_crossentropy', optimizer = Adam(lr=1e-3), metrics=["accuracy"])
annealer = ReduceLROnPlateau(monitor='val_acc', patience=2, verbose=1, factor=0.5, min_lr=0.00001)
epochs = 50
batch_size = 64
|
Digit Recognizer
|
12,997,499 |
class net(torch.nn.Module):
def __init__(self, base_model, base_out_features, num_classes):
super(net,self ).__init__()
self.base_model=base_model
self.linear1 = torch.nn.Linear(base_out_features, 512)
self.output = torch.nn.Linear(512,num_classes)
def forward(self,x):
x = F.relu(self.base_model(x))
x = F.relu(self.linear1(x))
x = self.output(x)
return x
res = torchvision.models.resnet50(pretrained=True)
for param in res.parameters() :
param.requires_grad=False
model_final = net(base_model=res, base_out_features=res.fc.out_features, num_classes=120)
model_final = model_final.to(device )<choose_model_class>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=2, verbose=1, factor=0.5, min_lr=0.00001 )
|
Digit Recognizer
|
12,997,499 |
cost_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam([param for param in model_final.parameters() if param.requires_grad], lr=0.0003)
EPOCHS = 30<train_model>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False )
|
Digit Recognizer
|
12,997,499 |
train_losses, train_acc, val_losses, val_acc = train_model(model=model_final,
cost_function=cost_function,
optimizer=optimizer,
num_epochs=EPOCHS )<load_from_csv>
|
datagen.fit(X_train )
|
Digit Recognizer
|
12,997,499 |
test_dir = '.. /input/dog-breed-identification/test'
test_df = test_df[['id']]
test_df.id = test_df.id.apply(lambda x: x+'.jpg')
test_df.id = test_df.id.apply(lambda x : test_dir+'/'+x)
test_set = img_dataset(test_df,transform=val_transformer, test=True)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False )<predict_on_test>
|
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction] )
|
Digit Recognizer
|
12,997,499 |
model_final.eval()
predictions = torch.tensor([])
print('Start predicting.... ')
for x in test_loader:
x = x.to(device)
y_hat = model_final(x)
predictions = torch.cat([predictions, y_hat.cpu() ])
print('Finish prediction.' )<predict_on_test>
|
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
12,997,499 |
<save_to_csv><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("sub2.csv",index=False)
|
Digit Recognizer
|
12,983,127 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<install_modules>
|
%matplotlib inline
|
Digit Recognizer
|
12,983,127 |
print("
...INSTALLING AND IMPORTING CELL-PROFILER TOOL(HPACELLSEG )...
")
try:
except:
!pip install -q "/kaggle/input/pycocotools/pycocotools-2.0-cp37-cp37m-linux_x86_64.whl"
!pip install -q "/kaggle/input/hpapytorchzoozip/pytorch_zoo-master"
!pip install -q "/kaggle/input/hpacellsegmentatormaster/HPA-Cell-Segmentation-master"
print("
...OTHER IMPORTS STARTING...
")
print("
\tVERSION INFORMATION")
LBL_NAMES = ["Nucleoplasm", "Nuclear Membrane", "Nucleoli", "Nucleoli Fibrillar Center", "Nuclear Speckles", "Nuclear Bodies", "Endoplasmic Reticulum", "Golgi Apparatus", "Intermediate Filaments", "Actin Filaments", "Microtubules", "Mitotic Spindle", "Centrosome", "Plasma Membrane", "Mitochondria", "Aggresome", "Cytosol", "Vesicles", "Negative"]
INT_2_STR = {x:LBL_NAMES[x] for x in np.arange(19)}
INT_2_STR_LOWER = {k:v.lower().replace(" ", "_")for k,v in INT_2_STR.items() }
STR_2_INT_LOWER = {v:k for k,v in INT_2_STR_LOWER.items() }
STR_2_INT = {v:k for k,v in INT_2_STR.items() }
FIG_FONT = dict(family="Helvetica, Arial", size=14, color="
LABEL_COLORS = [px.colors.label_rgb(px.colors.convert_to_RGB_255(x)) for x in sns.color_palette("Spectral", len(LBL_NAMES)) ]
LABEL_COL_MAP = {str(i):x for i,x in enumerate(LABEL_COLORS)}
print("
...IMPORTS COMPLETE...
")
ONLY_PUBLIC = True
if ONLY_PUBLIC:
print("
...ONLY INFERRING ON PUBLIC TEST DATA(USING PRE-PROCESSED DF )...
")
else:
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "...Physical GPUs,", len(logical_gpus), "Logical GPUs...
")
except RuntimeError as e:
print(e )<define_variables>
|
with open("/kaggle/input/digit-recognizer/train.csv")as f:
reader = csv.reader(f, delimiter=',')
next(reader)
dataset = [row for row in reader]
|
Digit Recognizer
|
12,983,127 |
NUC_MODEL = '/kaggle/input/hpacellsegmentatormodelweights/dpn_unet_nuclei_v1.pth'
CELL_MODEL = '/kaggle/input/hpacellsegmentatormodelweights/dpn_unet_cell_3ch_v1.pth'
B2_CELL_CLSFR_DIR = "/kaggle/input/hpa-cellwise-classification-training/ebnet_b2_wdensehead/ckpt-0007-0.0901.ckpt"
DATA_DIR = "/kaggle/input/hpa-single-cell-image-classification"
TEST_IMG_DIR = os.path.join(DATA_DIR, "test")
TEST_IMG_PATHS = sorted([os.path.join(TEST_IMG_DIR, f_name)for f_name in os.listdir(TEST_IMG_DIR)])
print(f"...The number of testing images is {len(TEST_IMG_PATHS)}" \
f"
\t--> i.e.{len(TEST_IMG_PATHS)//4} 4-channel images...")
PUB_SS_CSV = "/kaggle/input/hpa-sample-submission-with-extra-metadata/updated_sample_submission.csv"
SWAP_SS_CSV = os.path.join(DATA_DIR, "sample_submission.csv")
ss_df = pd.read_csv(SWAP_SS_CSV)
DO_TTA = True
TTA_REPEATS = 8
IS_DEMO = len(ss_df)==559
if IS_DEMO:
ss_df_1 = ss_df.drop_duplicates("ImageWidth", keep="first")
ss_df_2 = ss_df.drop_duplicates("ImageWidth", keep="last")
ss_df = pd.concat([ss_df_1, ss_df_2])
del ss_df_1; del ss_df_2; gc.collect() ;
print("
SAMPLE SUBMISSION DATAFRAME
")
display(ss_df)
else:
print("
SAMPLE SUBMISSION DATAFRAME
")
display(ss_df)
if ONLY_PUBLIC:
pub_ss_df = pd.read_csv(PUB_SS_CSV)
if IS_DEMO:
pub_ss_df_1 = pub_ss_df.drop_duplicates("ImageWidth", keep="first")
pub_ss_df_2 = pub_ss_df.drop_duplicates("ImageWidth", keep="last")
pub_ss_df = pd.concat([pub_ss_df_1, pub_ss_df_2])
pub_ss_df.mask_rles = pub_ss_df.mask_rles.apply(lambda x: ast.literal_eval(x))
pub_ss_df.mask_bboxes = pub_ss_df.mask_bboxes.apply(lambda x: ast.literal_eval(x))
pub_ss_df.mask_sub_rles = pub_ss_df.mask_sub_rles.apply(lambda x: ast.literal_eval(x))
print("
TEST DATAFRAME W/ MASKS
")
display(pub_ss_df )<categorify>
|
labels, images = np.array_split(np.array(dataset, dtype='float32'), [1,], axis=1)
labels, images = labels.flatten() , images.reshape(( len(images), 28, 28)) [..., np.newaxis]
|
Digit Recognizer
|
12,983,127 |
def binary_mask_to_ascii(mask, mask_val=1):
mask = np.where(mask==mask_val, 1, 0 ).astype(np.bool)
if mask.dtype != np.bool:
raise ValueError(f"encode_binary_mask expects a binary mask, received dtype == {mask.dtype}")
mask = np.squeeze(mask)
if len(mask.shape)!= 2:
raise ValueError(f"encode_binary_mask expects a 2d mask, received shape == {mask.shape}")
mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1)
mask_to_encode = mask_to_encode.astype(np.uint8)
mask_to_encode = np.asfortranarray(mask_to_encode)
encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"]
binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION)
base64_str = base64.b64encode(binary_str)
return base64_str.decode()
def rle_encoding(img, mask_val=1):
dots = np.where(img.T.flatten() == mask_val)[0]
run_lengths = []
prev = -2
for b in dots:
if(b>prev+1): run_lengths.extend(( b + 1, 0))
run_lengths[-1] += 1
prev = b
return ' '.join([str(x)for x in run_lengths])
def rle_to_mask(rle_string, height, width):
rows,cols = height,width
rle_numbers = [int(num_string)for num_string in rle_string.split(' ')]
rle_pairs = np.array(rle_numbers ).reshape(-1,2)
img = np.zeros(rows*cols,dtype=np.uint8)
for index,length in rle_pairs:
index -= 1
img[index:index+length] = 255
img = img.reshape(cols,rows)
img = img.T
return img
def decode_img(img, img_size=(224,224), testing=False):
if not testing:
img = tf.image.decode_png(img, channels=1)
return tf.cast(tf.image.resize(img, img_size), tf.uint8)
else:
return tf.image.decode_png(img, channels=1)
def preprocess_path_ds(rp, gp, bp, yp, lbl, n_classes=19, img_size=(224,224), combine=True, drop_yellow=True):
ri = decode_img(tf.io.read_file(rp), img_size)
gi = decode_img(tf.io.read_file(gp), img_size)
bi = decode_img(tf.io.read_file(bp), img_size)
yi = decode_img(tf.io.read_file(yp), img_size)
if combine and drop_yellow:
return tf.stack([ri[..., 0], gi[..., 0], bi[..., 0]], axis=-1), tf.one_hot(lbl, n_classes, dtype=tf.uint8)
elif combine:
return tf.stack([ri[..., 0], gi[..., 0], bi[..., 0], yi[..., 0]], axis=-1), tf.one_hot(lbl, n_classes, dtype=tf.uint8)
elif drop_yellow:
return ri, gi, bi, tf.one_hot(lbl, n_classes, dtype=tf.uint8)
else:
return ri, gi, bi, yi, tf.one_hot(lbl, n_classes, dtype=tf.uint8)
def create_pred_col(row):
if pd.isnull(row.PredictionString_y):
return row.PredictionString_x
else:
return row.PredictionString_y
def load_image(img_id, img_dir, testing=False, only_public=False):
if only_public:
return_axis = -1
clr_list = ["red", "green", "blue"]
else:
return_axis = 0
clr_list = ["red", "green", "blue", "yellow"]
if not testing:
rgby = [
np.asarray(Image.open(os.path.join(img_dir, img_id+f"_{c}.png")) , np.uint8)\
for c in ["red", "green", "blue", "yellow"]
]
return np.stack(rgby, axis=-1)
else:
return np.stack(
[np.asarray(decode_img(tf.io.read_file(os.path.join(img_dir, img_id+f"_{c}.png")) , testing=True), np.uint8)[..., 0] \
for c in clr_list], axis=return_axis,
)
def plot_rgb(arr, figsize=(12,12)) :
plt.figure(figsize=figsize)
plt.title(f"RGB Composite Image", fontweight="bold")
plt.imshow(arr)
plt.axis(False)
plt.show()
def convert_rgby_to_rgb(arr):
rgb_arr = np.zeros_like(arr[..., :-1])
rgb_arr[..., 0] = arr[..., 0]
rgb_arr[..., 1] = arr[..., 1]+arr[..., 3]/2
rgb_arr[..., 2] = arr[..., 2]
return rgb_arr
def plot_ex(arr, figsize=(20,6), title=None, plot_merged=True, rgb_only=False):
if plot_merged and not rgb_only:
n_images=5
elif plot_merged and rgb_only:
n_images=4
elif not plot_merged and rgb_only:
n_images=4
else:
n_images=3
plt.figure(figsize=figsize)
if type(title)== str:
plt.suptitle(title, fontsize=20, fontweight="bold")
for i, c in enumerate(["Red Channel – Microtubles", "Green Channel – Protein of Interest", "Blue - Nucleus", "Yellow – Endoplasmic Reticulum"]):
if not rgb_only:
ch_arr = np.zeros_like(arr[..., :-1])
else:
ch_arr = np.zeros_like(arr)
if c in ["Red Channel – Microtubles", "Green Channel – Protein of Interest", "Blue - Nucleus"]:
ch_arr[..., i] = arr[..., i]
else:
if rgb_only:
continue
ch_arr[..., 0] = arr[..., i]
ch_arr[..., 1] = arr[..., i]
plt.subplot(1,n_images,i+1)
plt.title(f"{c.title() }", fontweight="bold")
plt.imshow(ch_arr)
plt.axis(False)
if plot_merged:
plt.subplot(1,n_images,n_images)
if rgb_only:
plt.title(f"Merged RGB", fontweight="bold")
plt.imshow(arr)
else:
plt.title(f"Merged RGBY into RGB", fontweight="bold")
plt.imshow(convert_rgby_to_rgb(arr))
plt.axis(False)
plt.tight_layout(rect=[0, 0.2, 1, 0.97])
plt.show()
def flatten_list_of_lists(l_o_l, to_string=False):
if not to_string:
return [item for sublist in l_o_l for item in sublist]
else:
return [str(item)for sublist in l_o_l for item in sublist]
def create_segmentation_maps(list_of_image_lists, segmentator, batch_size=8):
all_mask_rles = {}
for i in tqdm(range(0, len(list_of_image_lists[0]), batch_size), total=len(list_of_image_lists[0])//batch_size):
sub_images = [img_channel_list[i:i+batch_size] for img_channel_list in list_of_image_lists]
cell_segmentations = segmentator.pred_cells(sub_images)
nuc_segmentations = segmentator.pred_nuclei(sub_images[2])
for j, path in enumerate(sub_images[0]):
img_id = path.replace("_red.png", "" ).rsplit("/", 1)[1]
nuc_mask, cell_mask = label_cell(nuc_segmentations[j], cell_segmentations[j])
new_name = os.path.basename(path ).replace('red','mask')
all_mask_rles[img_id] = [rle_encoding(cell_mask, mask_val=k)for k in range(1, np.max(cell_mask)+1)]
return all_mask_rles
def get_img_list(img_dir, return_ids=False, sub_n=None):
if sub_n is None:
sub_n=len(glob(img_dir + '/' + f'*_red.png'))
if return_ids:
images = [sorted(glob(img_dir + '/' + f'*_{c}.png')) [:sub_n] for c in ["red", "yellow", "blue"]]
return [x.replace("_red.png", "" ).rsplit("/", 1)[1] for x in images[0]], images
else:
return [sorted(glob(img_dir + '/' + f'*_{c}.png')) [:sub_n] for c in ["red", "yellow", "blue"]]
def get_contour_bbox_from_rle(rle, width, height, return_mask=True,):
mask = rle_to_mask(rle, height, width ).copy()
cnts = grab_contours(
cv2.findContours(
mask,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE
))
x,y,w,h = cv2.boundingRect(cnts[0])
if return_mask:
return(x,y,x+w,y+h), mask
else:
return(x,y,x+w,y+h)
def get_contour_bbox_from_raw(raw_mask):
cnts = grab_contours(
cv2.findContours(
raw_mask,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE
))
xywhs = [cv2.boundingRect(cnt)for cnt in cnts]
xys = [(xywh[0], xywh[1], xywh[0]+xywh[2], xywh[1]+xywh[3])for xywh in xywhs]
return sorted(xys, key=lambda x:(x[1], x[0]))
def pad_to_square(a):
if a.shape[1]>a.shape[0]:
n_to_add = a.shape[1]-a.shape[0]
top_pad = n_to_add//2
bottom_pad = n_to_add-top_pad
a = np.pad(a, [(top_pad, bottom_pad),(0, 0),(0, 0)], mode='constant')
elif a.shape[0]>a.shape[1]:
n_to_add = a.shape[0]-a.shape[1]
left_pad = n_to_add//2
right_pad = n_to_add-left_pad
a = np.pad(a, [(0, 0),(left_pad, right_pad),(0, 0)], mode='constant')
else:
pass
return a
def cut_out_cells(rgby, rles, resize_to=(256,256), square_off=True, return_masks=False, from_raw=True):
w,h = rgby.shape[:2]
contour_bboxes = [get_contour_bbox(rle, w, h, return_mask=return_masks)for rle in rles]
if return_masks:
masks = [x[-1] for x in contour_bboxes]
contour_bboxes = [x[:-1] for x in contour_bboxes]
arrs = [rgby[bbox[1]:bbox[3], bbox[0]:bbox[2],...] for bbox in contour_bboxes]
if square_off:
arrs = [pad_to_square(arr)for arr in arrs]
if resize_to is not None:
arrs = [
cv2.resize(pad_to_square(arr ).astype(np.float32),
resize_to,
interpolation=cv2.INTER_CUBIC)\
for arr in arrs
]
if return_masks:
return arrs, masks
else:
return arrs
def grab_contours(cnts):
if len(cnts)== 2:
cnts = cnts[0]
elif len(cnts)== 3:
cnts = cnts[1]
else:
raise Exception(( "Contours tuple must have length 2 or 3, "
"otherwise OpenCV changed their cv2.findContours return "
"signature yet again.Refer to OpenCV's documentation "
"in that case"))
return cnts
def preprocess_row(img_id, img_w, img_h, combine=True, drop_yellow=True):
rp = os.path.join(TEST_IMG_DIR, img_id+"_red.png")
gp = os.path.join(TEST_IMG_DIR, img_id+"_green.png")
bp = os.path.join(TEST_IMG_DIR, img_id+"_blue.png")
yp = os.path.join(TEST_IMG_DIR, img_id+"_yellow.png")
ri = decode_img(tf.io.read_file(rp),(img_w, img_h), testing=True)
gi = decode_img(tf.io.read_file(gp),(img_w, img_h), testing=True)
bi = decode_img(tf.io.read_file(bp),(img_w, img_h), testing=True)
if not drop_yellow:
yi = decode_img(tf.io.read_file(yp),(img_w, img_h), testing=True)
if combine and drop_yellow:
return tf.stack([ri[..., 0], gi[..., 0], bi[..., 0]], axis=-1)
elif combine:
return tf.stack([ri[..., 0], gi[..., 0], bi[..., 0], yi[..., 0]], axis=-1)
elif drop_yellow:
return ri, gi, bi
else:
return ri, gi, bi, yi
def plot_predictions(img, masks, preds, confs=None, fill_alpha=0.3, lbl_as_str=True):
FONT = cv2.FONT_HERSHEY_SIMPLEX; FONT_SCALE = 0.7; FONT_THICKNESS = 2; FONT_LINE_TYPE = cv2.LINE_AA;
COLORS = [[round(y*255)for y in x] for x in sns.color_palette("Spectral", len(LBL_NAMES)) ]
to_plot = img.copy()
cntr_img = img.copy()
if confs==None:
confs = [None,]*len(masks)
cnts = grab_contours(
cv2.findContours(
masks,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE
))
cnts = sorted(cnts, key=lambda x:(cv2.boundingRect(x)[1], cv2.boundingRect(x)[0]))
for c, pred, conf in zip(cnts, preds, confs):
color = COLORS[pred[0]]
if not lbl_as_str:
classes = "CLS=["+",".join([str(p)for p in pred])+"]"
else:
classes = ", ".join([INT_2_STR[p] for p in pred])
M = cv2.moments(c)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
text_width, text_height = cv2.getTextSize(classes, FONT, FONT_SCALE, FONT_THICKNESS)[0]
cv2.drawContours(to_plot, [c], contourIdx=-1, color=[max(0, x-40)for x in color], thickness=10)
cv2.drawContours(cntr_img, [c], contourIdx=-1, color=(color), thickness=-1)
cv2.putText(to_plot, classes,(cx-text_width//2,cy-text_height//2),
FONT, FONT_SCALE, [min(255, x+40)for x in color], FONT_THICKNESS, FONT_LINE_TYPE)
cv2.addWeighted(cntr_img, fill_alpha, to_plot, 1-fill_alpha, 0, to_plot)
plt.figure(figsize=(16,16))
plt.imshow(to_plot)
plt.axis(False)
plt.show()
def tta(original_img_batch, repeats=4):
tta_img_batches = [original_img_batch,]
for i in range(repeats):
img_batch = original_img_batch
SEED = tf.random.uniform(( 2,), minval=0, maxval=100, dtype=tf.dtypes.int32)
K = tf.random.uniform(( 1,), minval=0, maxval=4, dtype=tf.dtypes.int32)[0]
img_batch = tf.image.stateless_random_flip_left_right(img_batch, SEED)
img_batch = tf.image.stateless_random_flip_up_down(img_batch, SEED)
img_batch = tf.image.rot90(img_batch, K)
img_batch = tf.image.stateless_random_saturation(img_batch, 0.9, 1.1, SEED)
img_batch = tf.image.stateless_random_brightness(img_batch, 0.075, SEED)
img_batch = tf.image.stateless_random_contrast(img_batch, 0.9, 1.1, SEED)
tta_img_batches.append(img_batch)
return tta_img_batches<define_variables>
|
split_ratio = 0.1
data_set = tf.data.Dataset.from_tensor_slices(( images, labels)).shuffle(len(labels))
train_set = data_set.take(int(len(labels)*(1-split_ratio)) ).batch(32 ).cache().prefetch(1)
val_set = data_set.skip(len(labels)-int(len(labels)*(1-split_ratio)) ).batch(32 ).cache().prefetch(1 )
|
Digit Recognizer
|
12,983,127 |
inference_model = tf.keras.models.load_model(B2_CELL_CLSFR_DIR)
IMAGE_SIZES = [1728, 2048, 3072, 4096]
BATCH_SIZE = 8
CONF_THRESH = 0.0
TILE_SIZE =(224,224)
if ONLY_PUBLIC:
predict_df_1728 = pub_ss_df[pub_ss_df.ImageWidth==IMAGE_SIZES[0]]
predict_df_2048 = pub_ss_df[pub_ss_df.ImageWidth==IMAGE_SIZES[1]]
predict_df_3072 = pub_ss_df[pub_ss_df.ImageWidth==IMAGE_SIZES[2]]
predict_df_4096 = pub_ss_df[pub_ss_df.ImageWidth==IMAGE_SIZES[3]]
else:
segmentator = cellsegmentator.CellSegmentator(NUC_MODEL, CELL_MODEL, scale_factor=0.25, padding=True)
predict_df_1728 = ss_df[ss_df.ImageWidth==IMAGE_SIZES[0]]
predict_df_2048 = ss_df[ss_df.ImageWidth==IMAGE_SIZES[1]]
predict_df_3072 = ss_df[ss_df.ImageWidth==IMAGE_SIZES[2]]
predict_df_4096 = ss_df[ss_df.ImageWidth==IMAGE_SIZES[3]]
predict_ids_1728 = predict_df_1728.ID.to_list()
predict_ids_2048 = predict_df_2048.ID.to_list()
predict_ids_3072 = predict_df_3072.ID.to_list()
predict_ids_4096 = predict_df_4096.ID.to_list()<create_dataframe>
|
data_augmentation = keras.models.Sequential([
keras.layers.experimental.preprocessing.Rescaling(1./255, input_shape=(28, 28, 1)) ,
keras.layers.experimental.preprocessing.RandomRotation(0.1),
keras.layers.experimental.preprocessing.RandomZoom(0.1),
keras.layers.experimental.preprocessing.RandomTranslation(0.1, 0.1)
] )
|
Digit Recognizer
|
12,983,127 |
predictions = []
sub_df = pd.DataFrame(columns=["ID"], data=predict_ids_1728+predict_ids_2048+predict_ids_3072+predict_ids_4096)
for size_idx, submission_ids in enumerate([predict_ids_1728, predict_ids_2048, predict_ids_3072, predict_ids_4096]):
size = IMAGE_SIZES[size_idx]
if submission_ids==[]:
print(f"
...SKIPPING SIZE {size} AS THERE ARE NO IMAGE IDS...
")
continue
else:
print(f"
...WORKING ON IMAGE IDS FOR SIZE {size}...
")
for i in tqdm(range(0, len(submission_ids), BATCH_SIZE), total=int(np.ceil(len(submission_ids)/BATCH_SIZE))):
batch_rgby_images = [
load_image(ID, TEST_IMG_DIR, testing=True, only_public=ONLY_PUBLIC)\
for ID in submission_ids[i:(i+BATCH_SIZE)]
]
if ONLY_PUBLIC:
batch_cell_bboxes = pub_ss_df[pub_ss_df.ID.isin(submission_ids[i:(i+BATCH_SIZE)])].mask_bboxes.values
batch_rgb_images = batch_rgby_images
submission_rles = pub_ss_df[pub_ss_df.ID.isin(submission_ids[i:(i+BATCH_SIZE)])].mask_sub_rles.values
if IS_DEMO:
batch_masks = [
sum([rle_to_mask(mask, size, size)for mask in batch])\
for batch in pub_ss_df[pub_ss_df.ID.isin(submission_ids[i:(i+BATCH_SIZE)])].mask_rles.values
]
else:
cell_segmentations = segmentator.pred_cells([[rgby_image[j] for rgby_image in batch_rgby_images] for j in [0, 3, 2]])
nuc_segmentations = segmentator.pred_nuclei([rgby_image[2] for rgby_image in batch_rgby_images])
batch_masks = [label_cell(nuc_seg, cell_seg)[1].astype(np.uint8)for nuc_seg, cell_seg in zip(nuc_segmentations, cell_segmentations)]
batch_rgb_images = [rgby_image.transpose(1,2,0)[..., :-1] for rgby_image in batch_rgby_images]
batch_cell_bboxes = [get_contour_bbox_from_raw(mask)for mask in batch_masks]
submission_rles = [[binary_mask_to_ascii(mask, mask_val=cell_id)for cell_id in range(1, mask.max() +1)] for mask in batch_masks]
batch_cell_tiles = [[
cv2.resize(
pad_to_square(
rgb_image[bbox[1]:bbox[3], bbox[0]:bbox[2],...]),
TILE_SIZE, interpolation=cv2.INTER_CUBIC)for bbox in bboxes]
for bboxes, rgb_image in zip(batch_cell_bboxes, batch_rgb_images)
]
if DO_TTA:
tta_batch_cell_tiles = [tta(tf.cast(ct, dtype=tf.float32), repeats=TTA_REPEATS)for ct in batch_cell_tiles]
else:
batch_cell_tiles = [tf.cast(ct, dtype=tf.float32)for ct in batch_cell_tiles]
if DO_TTA:
tta_batch_o_preds = [[inference_model.predict(ct)for ct in bct] for bct in tta_batch_cell_tiles]
batch_o_preds = [tf.keras.layers.Average()(tta_o_preds ).numpy() for tta_o_preds in tta_batch_o_preds]
else:
batch_o_preds = [inference_model.predict(cell_tiles)for cell_tiles in batch_cell_tiles]
batch_confs = [[pred[np.where(pred>CONF_THRESH)] for pred in o_preds] for o_preds in batch_o_preds]
batch_preds = [[np.where(pred>CONF_THRESH)[0] for pred in o_preds] for o_preds in batch_o_preds]
for j, preds in enumerate(batch_preds):
for k in range(len(preds)) :
if preds[k].size==0:
batch_preds[j][k]=np.array([18,])
batch_confs[j][k]=np.array([1-np.max(batch_o_preds[j][k]),])
if IS_DEMO:
print("
...DEMO IMAGES...
")
for rgb_images, masks, preds, confs in zip(batch_rgb_images, batch_masks, batch_preds, batch_confs):
plot_predictions(rgb_images, masks, preds, confs=confs, fill_alpha=0.2, lbl_as_str=True)
submission_rles = [flatten_list_of_lists([[m,]*len(p)for m, p in zip(masks, preds)])for masks, preds in zip(submission_rles, batch_preds)]
batch_preds = [flatten_list_of_lists(preds, to_string=True)for preds in batch_preds]
batch_confs = [[f"{conf:.4f}" for cell_confs in confs for conf in cell_confs] for confs in batch_confs]
predictions.extend([" ".join(flatten_list_of_lists(zip(*[preds,confs,masks])))for preds, confs, masks in zip(batch_preds, batch_confs, submission_rles)])
sub_df["PredictionString"] = predictions
print("
...TEST DATAFRAME...
")
display(sub_df.head(3))<save_to_csv>
|
DefaultConv2D = partial(keras.layers.Conv2D, kernel_size=3, activation='relu', padding='same')
model = keras.models.Sequential([
data_augmentation,
DefaultConv2D(filters=32),
DefaultConv2D(filters=32),
keras.layers.MaxPool2D() ,
DefaultConv2D(filters=64),
DefaultConv2D(filters=64),
keras.layers.MaxPool2D() ,
DefaultConv2D(filters=128),
DefaultConv2D(filters=128),
keras.layers.MaxPool2D() ,
keras.layers.Dropout(0.2),
keras.layers.Flatten() ,
keras.layers.Dense(128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(256, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=keras.optimizers.Adam() ,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
n_epochs=20
lr_schedule = keras.callbacks.LearningRateScheduler(lambda epoch: 1e-6 * 10 **(epoch / n_epochs * 5))
history = model.fit(train_set, epochs=n_epochs, callbacks=[lr_schedule] )
|
Digit Recognizer
|
12,983,127 |
ss_df = ss_df.merge(sub_df, how="left", on="ID")
ss_df["PredictionString"] = ss_df.apply(create_pred_col, axis=1)
ss_df = ss_df.drop(columns=["PredictionString_x", "PredictionString_y"])
ss_df.to_csv("/kaggle/working/submission.csv", index=False)
display(ss_df )<install_modules>
|
DefaultConv2D = partial(keras.layers.Conv2D, kernel_size=3, activation='relu', padding='same')
model = keras.models.Sequential([
data_augmentation,
DefaultConv2D(filters=32),
DefaultConv2D(filters=32),
keras.layers.MaxPool2D() ,
DefaultConv2D(filters=64),
DefaultConv2D(filters=64),
keras.layers.MaxPool2D() ,
DefaultConv2D(filters=128),
DefaultConv2D(filters=128),
keras.layers.MaxPool2D() ,
keras.layers.Dropout(0.2),
keras.layers.Flatten() ,
keras.layers.Dense(128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(256, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=keras.optimizers.Adam(1e-3),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
n_epochs=200
history = model.fit(train_set,validation_data=val_set,
epochs=n_epochs,
callbacks=[keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True),
keras.callbacks.ReduceLROnPlateau(patience=5, factor=0.5, min_lr=1e-4)] )
|
Digit Recognizer
|
12,983,127 |
!pip install.. /input/pycocotools/pycocotools-2.0-cp37-cp37m-linux_x86_64.whl
!pip install.. /input/hpapytorchzoozip/pytorch_zoo-master
!pip install.. /input/hpacellsegmentatormaster/HPA-Cell-Segmentation-master<set_options>
|
with open("/kaggle/input/digit-recognizer/test.csv")as f:
reader = csv.reader(f, delimiter=',')
next(reader)
dataset = [row for row in reader]
test_imgs = np.array(dataset, dtype='float32' ).reshape(( len(dataset), 28, 28)) [..., np.newaxis]
|
Digit Recognizer
|
12,983,127 |
random.seed(0)
LOCAL = False
COMPUTE_PUBLIC = False
COMPUTE_PRIVATE = True
EXP_NAME_1ST = ["exp049", "exp050"]
MODEL_NAMES_1ST = [
"model_best_0.pth",
"model_best_1.pth",
"model_best_2.pth",
"model_best_3.pth",
]
TEST_LOCAL_COMPUTED_1ST = [
"pred_0.csv",
"pred_1.csv",
"pred_2.csv",
"pred_3.csv"
]
BACKBONE_NAME = "seresnet152d"
EXP_NAME = ["exp068", "exp071", "exp072", "exp073"]
MODEL_NAMES = [
[
"model_best_0.pth", "model_tmp_0.pth",
"model_best_1.pth", "model_tmp_1.pth",
"model_best_2.pth", "model_tmp_2.pth",
"model_best_3.pth", "model_tmp_3.pth"
],
[
"model_0_25.pth", "model_0_21.pth",
"model_1_25.pth", "model_1_21.pth",
],
[
"model_0_25.pth", "model_0_21.pth",
],
[
"model_0_25.pth", "model_0_21.pth",
],
]
TEST_LOCAL_COMPUTED = [
[
"pred_0.csv",
"pred_1.csv",
"pred_2.csv",
"pred_3.csv"
],
[
"pred_0.csv",
"pred_1.csv",
],
[
"pred_0.csv",
],
[
"pred_0.csv",
],
]
MODEL_NAMES_IMAGE = [
"model_best_0.pth", "model_tmp_0.pth",
"model_best_1.pth", "model_tmp_1.pth",
"model_best_2.pth", "model_tmp_2.pth",
"model_best_3.pth", "model_tmp_3.pth",
"model_best_4.pth", "model_tmp_4.pth",
"model_best_5.pth", "model_tmp_5.pth",
"model_best_6.pth", "model_tmp_6.pth",
"model_best_7.pth", "model_tmp_7.pth",
]
MODEL_PATHS_IMAGE = [f".. /input/hpa-image-level-weight/exp102/{p}" for p in MODEL_NAMES_IMAGE]
TEST_LOCAL_COMPUTED_IMAGE = [
"pred_0.csv",
"pred_1.csv",
"pred_2.csv",
"pred_3.csv",
"pred_4.csv",
"pred_5.csv",
"pred_6.csv",
"pred_7.csv",
]
TEST_LOCAL_COMPUTED_PATHS_IMAGE = [f".. /input/hpa-image-level-weight/exp102/{p}" for p in TEST_LOCAL_COMPUTED_IMAGE]
COLS_TARGET = [f"label_{i}" for i in range(19)]
BATCH_SIZE = 32
IMAGE_SIZE = 512
MARGIN = 100
W_MASK = True
IN_CHANS = 4
KEEP_CELL_AREA_MIN = 0.005
KEEP_NUC_AREA_MIN = 0.001
KEEP_EDGE_CELL_AREA_MIN = 0.01
NUC_AREA_MIN_0to5 = 0.12
WEIGHT_CELL_LEVEL_VS_IMAGE_MEAN_VS_IMAGE_PRED = [0.6, 0., 0.4]
RATE_OF_WEIGHT_1ST_2ND = [0.2, 0.8]
GPUS = torch.cuda.device_count()
GPU = 0
ROOT = Path.cwd().parent
if LOCAL:
INPUT = ROOT / "input"
MODEL_PATHS = [ROOT / "output" / EXP_NAME / p for p in MODEL_NAMES]
TEST_LOCAL_COMPUTED_PATH = ROOT / "output" / EXP_NAME / TEST_LOCAL_COMPUTED
TEST_IMG_DIR = ROOT / "data" / "test_rgby_images"
MASK_DIR = ROOT / "data" / "mask"
NUC_MODEL = MASK_DIR / "dpn_unet_nuclei_v1.pth"
CELL_MODEL = MASK_DIR / "dpn_unet_cell_3ch_v1.pth"
MAX_THRE = 40
else:
INPUT = ROOT / "input" / "hpa-single-cell-image-classification"
LIB_DIR = ROOT / "input" / "hpa2021-libs"
MODEL_PATHS = []
TEST_LOCAL_COMPUTED_PATHS_1ST = []
for e_name in EXP_NAME_1ST:
MODEL_PATHS += [LIB_DIR / e_name / p for p in MODEL_NAMES_1ST]
TEST_LOCAL_COMPUTED_PATHS_1ST += [LIB_DIR / e_name / p for p in TEST_LOCAL_COMPUTED_1ST]
LEN_1ST = len(MODEL_PATHS)
TEST_LOCAL_COMPUTED_PATHS = []
for i, e_name in enumerate(EXP_NAME):
MODEL_PATHS += [LIB_DIR / e_name / p for p in MODEL_NAMES[i]]
TEST_LOCAL_COMPUTED_PATHS += [LIB_DIR / e_name / p for p in TEST_LOCAL_COMPUTED[i]]
WEIGHT_1ST_2ND = np.ones(len(MODEL_PATHS)).reshape(-1 , 1, 1)
WEIGHT_1ST_2ND[:LEN_1ST] = len(MODEL_PATHS)*(1 / LEN_1ST)* RATE_OF_WEIGHT_1ST_2ND[0]
WEIGHT_1ST_2ND[LEN_1ST:] = len(MODEL_PATHS)*(1 /(len(MODEL_PATHS)- LEN_1ST)) * RATE_OF_WEIGHT_1ST_2ND[1]
OUTPUT = ROOT / "temp"
MASK_DIR = OUTPUT / "mask"
MASK_DIR.mkdir(exist_ok=True, parents=True)
NUCEIL_DIR = MASK_DIR / "test" / "nuclei"
NUCEIL_DIR.mkdir(exist_ok=True, parents=True)
CELL_DIR = MASK_DIR / "test" / "cell"
CELL_DIR.mkdir(exist_ok=True, parents=True)
NUC_MODEL =(
ROOT / "input" / "hpacellsegmentatormodelweights" / "dpn_unet_nuclei_v1.pth"
)
CELL_MODEL =(
ROOT / "input" / "hpacellsegmentatormodelweights" / "dpn_unet_cell_3ch_v1.pth"
)
MAX_THRE = 2
sys.path.append(str(ROOT / "input" / "hpa2021-libs"))
sample_submission = pd.read_csv(INPUT / "sample_submission.csv")
print("NUC_MODEL:", NUC_MODEL.exists())
print("CELL_MODEL:", CELL_MODEL.exists())
print("MODEL_PATHS:", [p.exists() for p in MODEL_PATHS])
print("TEST_LOCAL_COMPUTED_PATHS:", [p.exists() for p in TEST_LOCAL_COMPUTED_PATHS])
<categorify>
|
predictions = [np.argmax(p)for p in model.predict(test_imgs)]
ids = np.arange(len(dataset)) + 1
submission_df = pd.DataFrame({'ImageId':ids, 'label':predictions})
submission_df.head()
|
Digit Recognizer
|
12,983,127 |
<load_from_csv><EOS>
|
submission_df.to_csv("submission.csv", index=False )
|
Digit Recognizer
|
14,556,937 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify>
|
%matplotlib inline
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep')
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
Digit Recognizer
|
14,556,937 |
test = post_process1(test)
test<predict_on_test>
|
train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
14,556,937 |
test_image = test.groupby("image_id" ).first().reset_index()
if LOCAL:
pass
else:
test_loader = DataLoader(
ImageDataset(test_image),
batch_size=BATCH_SIZE,
num_workers=MAX_THRE,
pin_memory=True,
)
models = []
for p in MODEL_PATHS_IMAGE:
model = get_model(p)
model.eval()
models.append(model)
preds = predict(models, test_loader, GPU)
display(pd.crosstab(test_image["image_id"], preds.argmax(1)))
test_image[COLS_TARGET] = preds
del models, model, test_loader
torch.cuda.empty_cache()
gc.collect()
display(test_image )<predict_on_test>
|
X = train_data.drop('label', axis=1)
y = train_data['label']
|
Digit Recognizer
|
14,556,937 |
if LOCAL:
pass
else:
test_loader = DataLoader(
MyDataset(test, mode="test", w_mask=W_MASK),
batch_size=BATCH_SIZE,
num_workers=MAX_THRE,
pin_memory=True,
)
models = []
for p in MODEL_PATHS:
model = get_model(p)
model.eval()
models.append(model)
preds = predict_weighted(models, test_loader, GPU, WEIGHT_1ST_2ND)
display(pd.crosstab(test["image_id"], preds.argmax(1)))
test[COLS_TARGET] = preds
test = post_process2(test)
test = post_process3(test, test_image)
del models, model
torch.cuda.empty_cache()
gc.collect()
display(test)
<load_from_csv>
|
sns.countplot(x=y)
print(y.value_counts().sort_index(ascending=True))
|
Digit Recognizer
|
14,556,937 |
cols = sample_submission.columns
if COMPUTE_PUBLIC is False and COMPUTE_PRIVATE is False:
print("dryrun, replace with local computed file")
test_2nd = pd.read_csv(TEST_LOCAL_COMPUTED_PATHS[0])
for p in TEST_LOCAL_COMPUTED_PATHS[1:]:
test_2nd.loc[:, COLS_TARGET] += pd.read_csv(p ).loc[:, COLS_TARGET]
test_2nd.loc[:, COLS_TARGET] /= len(TEST_LOCAL_COMPUTED_PATHS)
test_1st = pd.read_csv(TEST_LOCAL_COMPUTED_PATHS_1ST[0])
for p in TEST_LOCAL_COMPUTED_PATHS_1ST[1:]:
test_1st.loc[:, COLS_TARGET] += pd.read_csv(p ).loc[:, COLS_TARGET]
test_1st.loc[:, COLS_TARGET] /= len(TEST_LOCAL_COMPUTED_PATHS_1ST)
test = test_2nd.copy()
test_1st.iloc[:, 11] = 0
test.loc[:, COLS_TARGET] = test_1st.loc[:, COLS_TARGET] * RATE_OF_WEIGHT_1ST_2ND[0] + test_2nd.loc[:, COLS_TARGET] * RATE_OF_WEIGHT_1ST_2ND[1]
test_image = pd.read_csv(TEST_LOCAL_COMPUTED_PATHS_IMAGE[0])
for p in TEST_LOCAL_COMPUTED_PATHS_IMAGE[1:]:
test_image.loc[:, COLS_TARGET] += pd.read_csv(p ).loc[:, COLS_TARGET]
test_image.loc[:, COLS_TARGET] /= len(TEST_LOCAL_COMPUTED_PATHS_IMAGE)
test = post_process1(test)
test = post_process2(test)
test = post_process3(test, test_image)
write_submission(test, fill_shortage=True)
elif COMPUTE_PUBLIC is False and COMPUTE_PRIVATE is True:
print("only private")
test_2nd = pd.read_csv(TEST_LOCAL_COMPUTED_PATHS[0])
for p in TEST_LOCAL_COMPUTED_PATHS[1:]:
test_2nd.loc[:, COLS_TARGET] += pd.read_csv(p ).loc[:, COLS_TARGET]
test_2nd.loc[:, COLS_TARGET] /= len(TEST_LOCAL_COMPUTED_PATHS)
test_1st = pd.read_csv(TEST_LOCAL_COMPUTED_PATHS_1ST[0])
for p in TEST_LOCAL_COMPUTED_PATHS_1ST[1:]:
test_1st.loc[:, COLS_TARGET] += pd.read_csv(p ).loc[:, COLS_TARGET]
test_1st.loc[:, COLS_TARGET] /= len(TEST_LOCAL_COMPUTED_PATHS_1ST)
test_local = test_2nd.copy()
test_1st.iloc[:, 11] = 0
test_local.loc[:, COLS_TARGET] = test_1st.loc[:, COLS_TARGET] * RATE_OF_WEIGHT_1ST_2ND[0] + test_2nd.loc[:, COLS_TARGET] * RATE_OF_WEIGHT_1ST_2ND[1]
test_image_local = pd.read_csv(TEST_LOCAL_COMPUTED_PATHS_IMAGE[0])
for p in TEST_LOCAL_COMPUTED_PATHS_IMAGE[1:]:
test_image_local.loc[:, COLS_TARGET] += pd.read_csv(p ).loc[:, COLS_TARGET]
test_image_local.loc[:, COLS_TARGET] /= len(TEST_LOCAL_COMPUTED_PATHS_IMAGE)
test_local = post_process1(test_local)
test_local = post_process2(test_local)
test_local = post_process3(test_local, test_image_local)
if len(sample_submission)== 559:
write_submission(test_local, fill_shortage=False)
else:
test2 = pd.concat([test_local, test[test_local.columns]], ignore_index=True)
write_submission(test2, fill_shortage=False)
elif COMPUTE_PUBLIC is True and COMPUTE_PRIVATE is True:
print("full compute")
write_submission(test, fill_shortage=False)
elif COMPUTE_PUBLIC is True and COMPUTE_PRIVATE is False:
print("only public")
write_submission(test, fill_shortage=True)
<load_from_csv>
|
X.isna().sum().value_counts()
|
Digit Recognizer
|
14,556,937 |
if len(sample_submission)== 559:
sub = pd.read_csv("submission.csv")
display(sub )<load_from_csv>
|
y.isna().sum()
|
Digit Recognizer
|
14,556,937 |
if len(sample_submission)== 559:
sub = pd.read_csv("submission.csv")
for index, row in sub.head(3 ).iterrows() :
image_id = row["ID"]
w = row["ImageWidth"]
h = row["ImageHeight"]
pred_strs = row["PredictionString"].split()
pred_strs = list(split_list(pred_strs, 3))
for i, pred in enumerate(pred_strs):
class_id, cnf, encoded_mask = pred
class_id = int(class_id)
cnf = float(cnf)
print(f"class_id:{class_id}, image_id:{image_id}, confidence:{cnf}")
mask = decode_binary_mask(encoded_mask, w, h)
print_masked_img(image_id, mask)
if i == 9:
break
<install_modules>
|
X = X / 255.0
X = X.values.reshape(-1, 28, 28, 1)
y = to_categorical(y, num_classes = 10 )
|
Digit Recognizer
|
14,556,937 |
!pip install -q.. /input/pycocotools/pycocotools-2.0-cp37-cp37m-linux_x86_64.whl<define_variables>
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42 )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.