kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
14,596,146 |
test_df['encoded_brand_name'] = le_brand.transform(test_df['brand_name'])
test_df['encoded_general_cat'] = le_general_cat.transform(test_df['general_cat'])
test_df['encoded_subcat_1'] = le_subcat_1.transform(test_df['subcat_1'])
test_df['encoded_subcat_2'] = le_subcat_2.transform(test_df['subcat_2'])
test_df['seq_item_description'] = tok_desc.texts_to_sequences(test_df["item_description"].values)
test_df['seq_name'] = tok_name.texts_to_sequences(test_df["name"].values)
test_X = get_rnn_data(test_df)
preds_rnn_test = model.predict(test_X )<concatenate>
|
num_epochs = 50
num_models = 10
batch_size = 100
list_histories = []
list_models = []
for i in range(num_models):
model = createModel()
history = model.fit(datagen.flow(train_images, train_labels, batch_size=batch_size),
epochs=num_epochs,
validation_data=(validation_images, validation_labels),
callbacks = [lr_reduction, checkpoint])
list_histories.append(history)
list_models.append(model )
|
Digit Recognizer
|
14,596,146 |
preds_rnn_test = np.expm1(preds_rnn_test.flatten() )<prepare_output>
|
pred = np.zeros(( len(test_images),10))
for i in range(num_models):
pred += list_models[i].predict(test_images )
|
Digit Recognizer
|
14,596,146 |
submission = test_df[["test_id"]]
submission["price"] = preds_rnn_test<save_to_csv>
|
pred = np.argmax(pred, axis=-1)
sub = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv')
sub['Label'] = pred
sub.head()
|
Digit Recognizer
|
14,596,146 |
submission.to_csv("submission.csv", index=False )<save_to_csv>
|
sub.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
14,596,146 |
submission.to_csv("submission.csv", index=False )<set_options>
|
sub.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
13,720,425 |
warnings.filterwarnings("ignore" )<load_from_csv>
|
fastai.__version__
|
Digit Recognizer
|
13,720,425 |
train_data = pd.read_csv(path+'train.csv')
samp_subm = pd.read_csv(path+'sample_submission.csv' )<define_variables>
|
assert torch.cuda.is_available() , "GPU not available"
|
Digit Recognizer
|
13,720,425 |
print('Number train samples:', len(train_data.index))
print('Number test samples:', len(samp_subm.index))<load_from_csv>
|
torch.__version__
|
Digit Recognizer
|
13,720,425 |
idnum = 2
image_id = train_data.loc[idnum, 'image_id']
data_file = dicom.dcmread(path+'train/'+image_id+'.dicom')
img = data_file.pixel_array<load_from_csv>
|
from fastai import *
from fastai.vision.all import *
|
Digit Recognizer
|
13,720,425 |
pred_2class = pd.read_csv(".. /input/vinbigdata-2class-prediction/2-cls test pred.csv")
low_threshold = 0.001
high_threshold = 0.87
pred_2class<load_from_csv>
|
path = Path('/kaggle/input/digit-recognizer' )
|
Digit Recognizer
|
13,720,425 |
NORMAL = "14 1 0 0 1 1"
pred_det_df = pd.read_csv(".. /input/vinbigdatastack/submission_postprocessed.csv")
n_normal_before = len(pred_det_df.query("PredictionString == @NORMAL"))
merged_df = pd.merge(pred_det_df, pred_2class, on="image_id", how="left")
if "target" in merged_df.columns:
merged_df["class0"] = 1 - merged_df["target"]
c0, c1, c2 = 0, 0, 0
for i in range(len(merged_df)) :
p0 = merged_df.loc[i, "class0"]
if p0 < low_threshold:
c0 += 1
elif low_threshold <= p0 and p0 < high_threshold:
merged_df.loc[i, "PredictionString"] += f" 14 {p0} 0 0 1 1"
c1 += 1
else:
merged_df.loc[i, "PredictionString"] = NORMAL
c2 += 1<save_to_csv>
|
path.ls()
|
Digit Recognizer
|
13,720,425 |
n_normal_after = len(merged_df.query("PredictionString == @NORMAL"))
print(
f"n_normal: {n_normal_before} -> {n_normal_after} with threshold {low_threshold} & {high_threshold}"
)
print(f"Keep {c0} Add {c1} Replace {c2}")
submission_filepath = str("submission.csv")
submission_df = merged_df[["image_id", "PredictionString"]]
submission_df.to_csv(submission_filepath, index=False)
print(f"Saved to {submission_filepath}" )<install_modules>
|
df = pd.read_csv(path/'train.csv', low_memory=False)
df_test = pd.read_csv(path/'test.csv', low_memory=False )
|
Digit Recognizer
|
13,720,425 |
!pip install -U ensemble-boxes<import_modules>
|
path_train = Path(".. /train")
path_test = Path(".. /test" )
|
Digit Recognizer
|
13,720,425 |
import pandas as pd
import numpy as np
from ensemble_boxes import *
from glob import glob
import copy
from tqdm import tqdm
import shutil<load_from_csv>
|
def saveDigit(digit_row, filepath):
digit = digit_row.reshape(28,28)
digit = digit.astype(np.uint8)
img = Image.fromarray(digit)
img.save(filepath )
|
Digit Recognizer
|
13,720,425 |
height_dict = pd.read_csv('.. /input/vinbigdata-original-image-dataset/vinbigdata/test.csv' ).to_dict('records')
fnl_dict ={}
for ix,i in enumerate(height_dict):
fnl_dict[i['image_id']] = [i['width'],i['height'],i['width'],i['height']]<load_from_csv>
|
for index, row in df.iterrows() :
filePath = f'.. /train/{row[0]}/{index}.jpg'
saveDigit(row[1:].values, filePath)
|
Digit Recognizer
|
13,720,425 |
subs = [
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_1.csv'),
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_2.csv'),
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_3.csv'),
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_4.csv'),
pd.read_csv('.. /input/yolo-vbd-lots-of-decimals/Fold_5.csv')
]
pred_2cls = pd.read_csv('.. /input/vinbigdata-2-class-classifier-complete-pipeline/results/tmp_debug/test_pred.csv' )<categorify>
|
for index, row in df_test.iterrows() :
filePath = f'.. /test/{index+1}.jpg'
saveDigit(row.values, filePath )
|
Digit Recognizer
|
13,720,425 |
def submission_decoder(df:pd.DataFrame)-> np.ndarray:
info = df.values
df_lst = []
for i in info:
pre_lst = i[1].split(' ')
for j in range(0,len(pre_lst),6):
df_lst.append([i[0],int(pre_lst[j]),float(pre_lst[j+1]),int(pre_lst[j+2]),int(pre_lst[j+3]),\
int(pre_lst[j+4]),int(pre_lst[j+5]),fnl_dict.get(i[0])[0],fnl_dict.get(i[0])[1]])
return pd.DataFrame(df_lst,columns = ['image_id','class_id','score','x_min','y_min','x_max','y_max','width','height'] )<categorify>
|
imgs = get_image_files(path_train )
|
Digit Recognizer
|
13,720,425 |
subs = [submission_decoder(subs[i])for i in range(len(subs)) ]<count_unique_values>
|
Image.open(imgs[0] )
|
Digit Recognizer
|
13,720,425 |
boxes_dict = {}
scores_dict = {}
labels_dict = {}
whwh_dict = {}
for i in tqdm(subs[0].image_id.unique()):
if not i in boxes_dict.keys() :
boxes_dict[i] = []
scores_dict[i] = []
labels_dict[i] = []
whwh_dict[i] = []
size_ratio = fnl_dict.get(i)
whwh_dict[i].append(size_ratio)
tmp_df = [subs[x][subs[x]['image_id']==i] for x in range(len(subs)) ]
for x in range(len(tmp_df)) :
boxes_dict[i].append(((tmp_df[x][['x_min','y_min','x_max','y_max']].values)/size_ratio ).tolist())
scores_dict[i].append(tmp_df[x]['score'].values.tolist())
labels_dict[i].append(tmp_df[x]['class_id'].values.tolist() )<compute_train_metric>
|
dls = db.dataloaders(path_train)
dls.show_batch()
|
Digit Recognizer
|
13,720,425 |
weights = [1]*5
weights1 = [3,2,4,5]
iou_thr = 0.5
skip_box_thr = 0.0001
sigma = 0.1
fnl = {}
for i in tqdm(boxes_dict.keys()):
boxes, scores, labels = weighted_boxes_fusion(boxes_dict[i], scores_dict[i], labels_dict[i],\
weights=weights, iou_thr=iou_thr, skip_box_thr=skip_box_thr)
if not i in fnl.keys() :
fnl[i] = {'boxes':[],'scores':[],'labels':[]}
fnl[i]['boxes'] = boxes*whwh_dict[i]
fnl[i]['scores'] = scores
fnl[i]['labels'] = labels<remove_duplicates>
|
xb,yb = dls.one_batch()
xb.shape,yb.shape
|
Digit Recognizer
|
13,720,425 |
pd_form = []
for i in fnl.keys() :
b = fnl[i]
for j in range(len(b['boxes'])) :
pd_form.append([i,int(b['labels'][j]),round(b['scores'][j],2),\
int(b['boxes'][j][0]),int(b['boxes'][j][1]),\
int(b['boxes'][j][2]),int(b['boxes'][j][3])])
final_df = pd.DataFrame(pd_form,columns = ['image_id','class_id','score','x_min','y_min','x_max','y_max'])
final_df = final_df.drop_duplicates(keep = 'first' )<categorify>
|
dls.bs = 32
|
Digit Recognizer
|
13,720,425 |
def submission_encoder(df:pd.DataFrame)-> np.ndarray:
dct = {}
for i in tqdm(df['image_id'].unique()):
if not i in dct.keys() :
dct[i] = []
tmp = df[df['image_id'] == i].values
for j in tmp:
dct[i].append(int(j[1]))
dct[i].append(float(j[2]))
dct[i].append(int(j[3]))
dct[i].append(int(j[4]))
dct[i].append(int(j[5]))
dct[i].append(int(j[6]))
dct[i] = map(str,dct[i])
dct[i] = ' '.join(dct[i])
dct = [[k, v] for k, v in dct.items() ]
return pd.DataFrame(dct,columns = ['image_id','PredictionString'] ).reset_index(drop = True)
df = submission_encoder(final_df)
df.to_csv('Fold5Yolo.csv', index=False )<merge>
|
learn = cnn_learner(dls, resnet50, metrics=accuracy ).to_fp16()
|
Digit Recognizer
|
13,720,425 |
NORMAL = "14 1 0 0 1 1"
low_threshold = 0.00
high_threshold = 0.99
pred_det_df = df
n_normal_before = len(pred_det_df.query("PredictionString == @NORMAL"))
merged_df = pd.merge(pred_det_df, pred_2cls, on="image_id", how="left")
if "target" in merged_df.columns:
merged_df["class0"] = 1 - merged_df["target"]
c0, c1, c2 = 0, 0, 0
for i in range(len(merged_df)) :
p0 = merged_df.loc[i, "class0"]
if p0 < low_threshold:
c0 += 1
elif low_threshold <= p0 and p0 < high_threshold:
if ' 14 ' not in merged_df.loc[i, "PredictionString"]:
merged_df.loc[i, "PredictionString"] += f" 14 {p0} 0 0 1 1"
c1 += 1
else:
merged_df.loc[i, "PredictionString"] = NORMAL
c2 += 1
n_normal_after = len(merged_df.query("PredictionString == @NORMAL"))
print(
f"n_normal: {n_normal_before} -> {n_normal_after} with threshold {low_threshold} & {high_threshold}"
)
print(f"Keep {c0} Add {c1} Replace {c2}")
submission_filepath = str("submission.csv")
submission_df = merged_df[["image_id", "PredictionString"]]
submission_df.to_csv(submission_filepath, index=False)
print(f"Saved to {submission_filepath}" )<set_options>
|
learn.fine_tune(6, freeze_epochs=3 )
|
Digit Recognizer
|
13,720,425 |
py.init_notebook_mode(connected=True)
pio.templates.default = "plotly_dark"
pd.set_option('max_columns', 50)
<install_modules>
|
test_dl = dls.test_dl(get_image_files(path_test))
class_score, y = learn.get_preds(dl=test_dl )
|
Digit Recognizer
|
13,720,425 |
!pip install detectron2 -f \
https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.7/index.html
!pip install pytorch-pfn-extras timm<load_pretrained>
|
class_score = np.argmax(class_score, axis=1)
|
Digit Recognizer
|
13,720,425 |
def save_yaml(filepath: str, content: Any, width: int = 120):
with open(filepath, "w")as f:
yaml.dump(content, f, width=width )<init_hyperparams>
|
predicted_classes = [dls.vocab[i] for i in class_score]
predicted_classes[:10]
|
Digit Recognizer
|
13,720,425 |
<init_hyperparams><EOS>
|
output = pd.DataFrame({'ImageId': image_id_list, 'Label': predicted_classes})
output.ImageId = output.ImageId.astype(int)
output = output.sort_values(by='ImageId', ignore_index=True)
output.to_csv('submission.csv', index=False)
output.head()
|
Digit Recognizer
|
13,783,879 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
|
%matplotlib inline
np.random.seed(2)
sns.set(style = 'white', context= 'notebook', palette = 'deep' )
|
Digit Recognizer
|
13,783,879 |
print("torch", torch.__version__)
flags = Flags().update(flags_dict)
print("flags", flags)
debug = flags.debug
outdir = Path(flags.outdir)
os.makedirs(str(outdir), exist_ok=True)
flags_dict = dataclasses.asdict(flags)
save_yaml(str(outdir / "flags.yaml"), flags_dict)
inputdir = Path("/kaggle/input")
datadir = inputdir / "vinbigdata-chest-xray-abnormalities-detection"
imgdir = inputdir / flags.imgdir_name
train = pd.read_csv(datadir / "train.csv")
<groupby>
|
train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
13,783,879 |
is_normal_df = train.groupby("image_id")["class_id"].agg(lambda s:(s == 14 ).sum() ).reset_index().rename({"class_id": "num_normal_annotations"}, axis=1)
is_normal_df.head()<categorify>
|
Y_train = train['label']
X_train = train.drop(labels = ['label'], axis = 1)
del train
g = sns.countplot(Y_train)
g
|
Digit Recognizer
|
13,783,879 |
num_normal_anno_counts_df = num_normal_anno_counts.reset_index()
num_normal_anno_counts_df["name"] = num_normal_anno_counts_df["index"].map({0: "Abnormal", 3: "Normal"})
num_normal_anno_counts_df<define_variables>
|
X_train = X_train/255.0
test = test/255.0
|
Digit Recognizer
|
13,783,879 |
def get_vinbigdata_dicts(
imgdir: Path,
train_df: pd.DataFrame,
train_data_type: str = "original",
use_cache: bool = True,
debug: bool = True,
target_indices: Optional[np.ndarray] = None,
):
debug_str = f"_debug{int(debug)}"
train_data_type_str = f"_{train_data_type}"
cache_path = Path(".")/ f"dataset_dicts_cache{train_data_type_str}{debug_str}.pkl"
if not use_cache or not cache_path.exists() :
print("Creating data...")
train_meta = pd.read_csv(imgdir / "train_meta.csv")
if debug:
train_meta = train_meta.iloc[:500]
image_id = train_meta.loc[0, "image_id"]
image_path = str(imgdir / "train" / f"{image_id}.png")
image = cv2.imread(image_path)
resized_height, resized_width, ch = image.shape
print(f"image shape: {image.shape}")
dataset_dicts = []
for index, train_meta_row in tqdm(train_meta.iterrows() , total=len(train_meta)) :
record = {}
image_id, height, width = train_meta_row.values
filename = str(imgdir / "train" / f"{image_id}.png")
record["file_name"] = filename
record["image_id"] = image_id
record["height"] = resized_height
record["width"] = resized_width
objs = []
for index2, row in train_df.query("image_id == @image_id" ).iterrows() :
class_id = row["class_id"]
if class_id == 14:
pass
else:
h_ratio = resized_height / height
w_ratio = resized_width / width
bbox_resized = [
int(row["x_min"])* w_ratio,
int(row["y_min"])* h_ratio,
int(row["x_max"])* w_ratio,
int(row["y_max"])* h_ratio,
]
obj = {
"bbox": bbox_resized,
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": class_id,
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
with open(cache_path, mode="wb")as f:
pickle.dump(dataset_dicts, f)
print(f"Load from cache {cache_path}")
with open(cache_path, mode="rb")as f:
dataset_dicts = pickle.load(f)
if target_indices is not None:
dataset_dicts = [dataset_dicts[i] for i in target_indices]
return dataset_dicts
def get_vinbigdata_dicts_test(
imgdir: Path, test_meta: pd.DataFrame, use_cache: bool = True, debug: bool = True,
):
debug_str = f"_debug{int(debug)}"
cache_path = Path(".")/ f"dataset_dicts_cache_test{debug_str}.pkl"
if not use_cache or not cache_path.exists() :
print("Creating data...")
if debug:
test_meta = test_meta.iloc[:500]
image_id = test_meta.loc[0, "image_id"]
image_path = str(imgdir / "test" / f"{image_id}.png")
image = cv2.imread(image_path)
resized_height, resized_width, ch = image.shape
print(f"image shape: {image.shape}")
dataset_dicts = []
for index, test_meta_row in tqdm(test_meta.iterrows() , total=len(test_meta)) :
record = {}
image_id, height, width = test_meta_row.values
filename = str(imgdir / "test" / f"{image_id}.png")
record["file_name"] = filename
record["image_id"] = image_id
record["height"] = resized_height
record["width"] = resized_width
dataset_dicts.append(record)
with open(cache_path, mode="wb")as f:
pickle.dump(dataset_dicts, f)
print(f"Load from cache {cache_path}")
with open(cache_path, mode="rb")as f:
dataset_dicts = pickle.load(f)
return dataset_dicts
<categorify>
|
X_train = X_train.values.reshape(-1,28,28,1)
test= test.values.reshape(-1,28,28,1 )
|
Digit Recognizer
|
13,783,879 |
class DatasetMixin(Dataset):
def __init__(self, transform=None):
self.transform = transform
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
if isinstance(index, slice):
current, stop, step = index.indices(len(self))
return [self.get_example_wrapper(i)for i in
six.moves.range(current, stop, step)]
elif isinstance(index, list)or isinstance(index, numpy.ndarray):
return [self.get_example_wrapper(i)for i in index]
else:
return self.get_example_wrapper(index)
def __len__(self):
raise NotImplementedError
def get_example_wrapper(self, i):
example = self.get_example(i)
if self.transform:
example = self.transform(example)
return example
def get_example(self, i):
raise NotImplementedError
<categorify>
|
Y_train = to_categorical(Y_train,num_classes= 10 )
|
Digit Recognizer
|
13,783,879 |
class VinbigdataTwoClassDataset(DatasetMixin):
def __init__(self, dataset_dicts, image_transform=None, transform=None, train: bool = True,
mixup_prob: float = -1.0, label_smoothing: float = 0.0):
super(VinbigdataTwoClassDataset, self ).__init__(transform=transform)
self.dataset_dicts = dataset_dicts
self.image_transform = image_transform
self.train = train
self.mixup_prob = mixup_prob
self.label_smoothing = label_smoothing
def _get_single_example(self, i):
d = self.dataset_dicts[i]
filename = d["file_name"]
img = cv2.imread(filename)
if self.image_transform:
img = self.image_transform(img)
img = torch.tensor(np.transpose(img,(2, 0, 1)).astype(np.float32))
if self.train:
label = int(len(d["annotations"])> 0)
if self.label_smoothing > 0:
if label == 0:
return img, float(label)+ self.label_smoothing
else:
return img, float(label)- self.label_smoothing
else:
return img, float(label)
else:
return img, None
def get_example(self, i):
img, label = self._get_single_example(i)
if self.mixup_prob > 0.and np.random.uniform() < self.mixup_prob:
j = np.random.randint(0, len(self.dataset_dicts))
p = np.random.uniform()
img2, label2 = self._get_single_example(j)
img = img * p + img2 *(1 - p)
if self.train:
label = label * p + label2 *(1 - p)
if self.train:
label_logit = torch.tensor([1 - label, label], dtype=torch.float32)
return img, label_logit
else:
return img
def __len__(self):
return len(self.dataset_dicts )<create_dataframe>
|
random_seed = 2
|
Digit Recognizer
|
13,783,879 |
dataset_dicts = get_vinbigdata_dicts(imgdir, train, debug=debug)
dataset = VinbigdataTwoClassDataset(dataset_dicts )<normalization>
|
X_train, X_val, Y_train, Y_val = \
train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed )
|
Digit Recognizer
|
13,783,879 |
class Transform:
def __init__(
self, hflip_prob: float = 0.5, ssr_prob: float = 0.5, random_bc_prob: float = 0.5
):
self.transform = A.Compose(
[
A.HorizontalFlip(p=hflip_prob),
A.ShiftScaleRotate(
shift_limit=0.0625, scale_limit=0.1, rotate_limit=10, p=ssr_prob
),
A.RandomBrightnessContrast(p=random_bc_prob),
]
)
def __call__(self, image):
image = self.transform(image=image)["image"]
return image
<create_dataframe>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(AveragePooling2D(pool_size=(2,2), strides=(2,2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.25))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
13,783,879 |
aug_dataset = VinbigdataTwoClassDataset(dataset_dicts, image_transform=Transform() )<categorify>
|
optimizer = Adam(
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_Accuracy',
patience=3,
verbose=1,
factor=5e-7,
min_lr=0.00001 )
|
Digit Recognizer
|
13,783,879 |
class Transform:
def __init__(self, aug_kwargs: Dict):
self.transform = A.Compose(
[getattr(A, name )(**kwargs)for name, kwargs in aug_kwargs.items() ]
)
def __call__(self, image):
image = self.transform(image=image)["image"]
return image<init_hyperparams>
|
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
epochs = 20
batch_size = 128
|
Digit Recognizer
|
13,783,879 |
class CNNFixedPredictor(nn.Module):
def __init__(self, cnn: nn.Module, num_classes: int = 2):
super(CNNFixedPredictor, self ).__init__()
self.cnn = cnn
self.lin = Linear(cnn.num_features, num_classes)
print("cnn.num_features", cnn.num_features)
for param in self.cnn.parameters() :
param.requires_grad = False
def forward(self, x):
feat = self.cnn(x)
return self.lin(feat)
<choose_model_class>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
13,783,879 |
def build_predictor(model_name: str, model_mode: str = "normal"):
if model_mode == "normal":
return timm.create_model(model_name, pretrained=True, num_classes=2, in_chans=3)
elif model_mode == "cnn_fixed":
timm_model = timm.create_model(model_name, pretrained=True, num_classes=0, in_chans=3)
return CNNFixedPredictor(timm_model, num_classes=2)
else:
raise ValueError(f"[ERROR] Unexpected value model_mode={model_mode}")
<compute_test_metric>
|
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size, callbacks=[learning_rate_reduction]
)
|
Digit Recognizer
|
13,783,879 |
def accuracy(y: torch.Tensor, t: torch.Tensor)-> torch.Tensor:
assert y.shape[:-1] == t.shape, f"y {y.shape}, t {t.shape} is inconsistent."
pred_label = torch.max(y.detach() , dim=-1)[1]
count = t.nelement()
correct =(pred_label == t ).sum().float()
acc = correct / count
return acc
def accuracy_with_logits(y: torch.Tensor, t: torch.Tensor)-> torch.Tensor:
assert y.shape == t.shape
gt_label = torch.max(t.detach() , dim=-1)[1]
return accuracy(y, gt_label )<compute_train_metric>
|
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
13,783,879 |
<find_best_params><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False)
|
Digit Recognizer
|
13,570,098 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<find_best_params>
|
import numpy as np
import pandas as pd
|
Digit Recognizer
|
13,570,098 |
supported_models = timm.list_models()
print(f"{len(supported_models)} models are supported in timm.")
print(supported_models )<import_modules>
|
train_pd = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_pd = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
13,570,098 |
class EMA(object):
def __init__(
self,
model: nn.Module,
decay: float,
strict: bool = True,
use_dynamic_decay: bool = True,
):
self.decay = decay
self.model = model
self.strict = strict
self.use_dynamic_decay = use_dynamic_decay
self.logger = getLogger(__name__)
self.n_step = 0
self.shadow = {}
self.original = {}
self._assigned = False
for name, param in model.named_parameters() :
if param.requires_grad:
self.shadow[name] = param.data.clone()
def step(self):
self.n_step += 1
if self.use_dynamic_decay:
_n_step = float(self.n_step)
decay = min(self.decay,(1.0 + _n_step)/(10.0 + _n_step))
else:
decay = self.decay
for name, param in self.model.named_parameters() :
if param.requires_grad:
assert name in self.shadow
new_average =(1.0 - decay)* param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
__call__ = step
def assign(self):
if self._assigned:
if self.strict:
raise ValueError("[ERROR] `assign` is called again before `resume`.")
else:
self.logger.warning(
"`assign` is called again before `resume`."
"shadow parameter is already assigned, skip."
)
return
for name, param in self.model.named_parameters() :
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone()
param.data = self.shadow[name]
self._assigned = True
def resume(self):
if not self._assigned:
if self.strict:
raise ValueError("[ERROR] `resume` is called before `assign`.")
else:
self.logger.warning("`resume` is called before `assign`, skip.")
return
for name, param in self.model.named_parameters() :
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name]
self._assigned = False
<set_options>
|
label_nums = train_pd["label"]
label = to_categorical(label_nums, num_classes = 10)
train = train_pd.drop(labels = ["label"],axis = 1)/ 255.0
test = test_pd / 255.0
(rows, cols, channels)=(28,28,1)
def ReShape(DF, rows, cols, channels):
return DF.values.reshape(DF.shape[0],rows,cols,channels)
train = ReShape(train, rows, cols, channels)
test = ReShape(test, rows, cols, channels)
X_train, X_val, Y_train, Y_val = train_test_split(train,label, test_size = 0.1)
print("X_train shape:" , X_train.shape, " Y_train shape:", Y_train.shape)
print("X_val shape:", X_val.shape, " Y_val shape:", Y_val.shape)
print("test shape:", test.shape )
|
Digit Recognizer
|
13,570,098 |
class LRScheduler(Extension):
trigger = 1, 'iteration'
priority = PRIORITY_READER
name = None
def __init__(self, optimizer: optim.Optimizer, scheduler_type: str, scheduler_kwargs: Mapping[str, Any])-> None:
super().__init__()
self.scheduler = getattr(optim.lr_scheduler, scheduler_type )(optimizer, **scheduler_kwargs)
def __call__(self, manager: ExtensionsManager)-> None:
self.scheduler.step()
def state_dict(self)-> None:
return self.scheduler.state_dict()
def load_state_dict(self, to_load)-> None:
self.scheduler.load_state_dict(to_load)
<train_model>
|
from keras.layers import Activation,Dropout,Dense,Conv2D,AveragePooling2D,MaxPooling2D,Flatten,ZeroPadding2D,BatchNormalization
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.losses import categorical_crossentropy
from keras.callbacks import EarlyStopping
|
Digit Recognizer
|
13,570,098 |
def create_trainer(model, optimizer, device)-> Engine:
model.to(device)
def update_fn(engine, batch):
model.train()
optimizer.zero_grad()
loss, metrics = model(*[elem.to(device)for elem in batch])
loss.backward()
optimizer.step()
return metrics
trainer = Engine(update_fn)
return trainer
<import_modules>
|
class LeNet5(Sequential):
def __init__(self, input_shape=(rows, cols, channels),activation='tanh',pooling='avg',dropout = 0, name="Base"):
super().__init__(name='LeNet5_'+name)
self.add(Conv2D(6, kernel_size=(5, 5), strides=(1, 1), activation=activation, input_shape=input_shape, padding="same"))
if pooling == 'avg':
self.add(AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
else:
self.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
self.add(Conv2D(16, kernel_size=(5, 5), strides=(1, 1), activation=activation, padding='valid'))
if pooling == 'avg':
self.add(AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
else:
self.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
if dropout > 0:
self.add(( Dropout(rate = dropout)))
self.add(Flatten())
self.add(Dense(120, activation=activation))
self.add(Dense(84, activation=activation))
self.add(Dense(10, activation='softmax'))
self.compile(optimizer='adam',
loss=categorical_crossentropy,
metrics=['accuracy'])
class LeNet5BN(Sequential):
def __init__(self, input_shape=(rows, cols, channels), name="ImprovedBN"):
super().__init__(name='LeNet5_'+name)
self.add(Conv2D(6, kernel_size=(5, 5), strides=(1, 1), input_shape=input_shape, padding="same"))
self.add(BatchNormalization())
self.add(Activation('relu'))
self.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
self.add(Conv2D(16, kernel_size=(5, 5), strides=(1, 1), padding='valid'))
self.add(BatchNormalization())
self.add(Activation('relu'))
self.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
self.add(Flatten())
self.add(Dense(120))
self.add(BatchNormalization())
self.add(Activation('relu'))
self.add(Dense(84))
self.add(BatchNormalization())
self.add(Activation('relu'))
self.add(Dense(10, activation='softmax'))
self.compile(optimizer='adam',
loss=categorical_crossentropy,
metrics=['accuracy'] )
|
Digit Recognizer
|
13,570,098 |
import dataclasses
import os
import sys
from pathlib import Path
import numpy as np
import pandas as pd
import pytorch_pfn_extras.training.extensions as E
import torch
from ignite.engine import Events
from pytorch_pfn_extras.training import IgniteExtensionsManager
from sklearn.model_selection import StratifiedKFold
from torch import nn, optim
from torch.utils.data.dataloader import DataLoader<split>
|
early_stop = EarlyStopping(patience=5, monitor='val_accuracy', restore_best_weights=True)
model_LeNet5_base.fit(X_train, y=Y_train,
epochs=25, batch_size=128,
validation_data=(X_val, Y_val),
callbacks=[early_stop] )
|
Digit Recognizer
|
13,570,098 |
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=flags.seed)
y = np.array([int(len(d["annotations"])> 0)for d in dataset_dicts])
split_inds = list(skf.split(dataset_dicts, y))
train_inds, valid_inds = split_inds[flags.target_fold]
train_dataset = VinbigdataTwoClassDataset(
[dataset_dicts[i] for i in train_inds],
image_transform=Transform(flags.aug_kwargs),
mixup_prob=flags.mixup_prob,
label_smoothing=flags.label_smoothing,
)
valid_dataset = VinbigdataTwoClassDataset([dataset_dicts[i] for i in valid_inds])
<choose_model_class>
|
preds1 = model_LeNet5_base.evaluate(x = X_val, y = Y_val)
print("Loss = " + str(preds1[0]))
print("Val Accuracy = " + str(preds1[1]))
|
Digit Recognizer
|
13,570,098 |
train_loader = DataLoader(
train_dataset,
batch_size=flags.batchsize,
num_workers=flags.num_workers,
shuffle=True,
pin_memory=True,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=flags.valid_batchsize,
num_workers=flags.num_workers,
shuffle=False,
pin_memory=True,
)
device = torch.device(flags.device)
predictor = build_predictor(model_name=flags.model_name, model_mode=flags.model_mode)
classifier = Classifier(predictor)
model = classifier
optimizer = optim.Adam([param for param in model.parameters() if param.requires_grad], lr=1e-3)
trainer = create_trainer(model, optimizer, device)
ema = EMA(predictor, decay=flags.ema_decay)
def eval_func(*batch):
loss, metrics = model(*[elem.to(device)for elem in batch])
if flags.ema_decay > 0:
classifier.prefix = "ema_"
ema.assign()
loss, metrics = model(*[elem.to(device)for elem in batch])
ema.resume()
classifier.prefix = ""
valid_evaluator = E.Evaluator(
valid_loader, model, progress_bar=False, eval_func=eval_func, device=device
)
log_trigger =(1, "epoch")
log_report = E.LogReport(trigger=log_trigger)
extensions = [
log_report,
E.ProgressBarNotebook(update_interval=10 if debug else 100),
E.PrintReportNotebook() ,
E.FailOnNonNumber() ,
]
epoch = flags.epoch
models = {"main": model}
optimizers = {"main": optimizer}
manager = IgniteExtensionsManager(
trainer, models, optimizers, epoch, extensions=extensions, out_dir=str(outdir),
)
manager.extend(valid_evaluator)
manager.extend(
E.snapshot_object(predictor, "predictor.pt"), trigger=(flags.snapshot_freq, "epoch")
)
if flags.scheduler_type != "":
scheduler_type = flags.scheduler_type
print(f"using {scheduler_type} scheduler with kwargs {flags.scheduler_kwargs}")
manager.extend(
LRScheduler(optimizer, scheduler_type, flags.scheduler_kwargs),
trigger=flags.scheduler_trigger,
)
manager.extend(E.observe_lr(optimizer=optimizer), trigger=log_trigger)
if flags.ema_decay > 0:
manager.extend(lambda manager: ema() , trigger=(1, "iteration"))
def save_ema_model(manager):
ema.assign()
torch.save(predictor.state_dict() , outdir / "predictor_ema.pt")
ema.resume()
manager.extend(save_ema_model, trigger=(flags.snapshot_freq, "epoch"))
_ = trainer.run(train_loader, max_epochs=epoch )<save_to_csv>
|
model_LeNet5 = LeNet5BN(name="Improved_BN")
model_LeNet5.summary()
|
Digit Recognizer
|
13,570,098 |
torch.save(predictor.state_dict() , outdir / "predictor_last.pt")
df = log_report.to_dataframe()
df.to_csv(outdir / "log.csv", index=False)
df<save_to_csv>
|
early_stop = EarlyStopping(patience=5, monitor='val_accuracy', restore_best_weights=True)
model_LeNet5.fit(X_train, y=Y_train,
epochs=25, batch_size=128,
validation_data=(X_val, Y_val),
callbacks=[early_stop] )
|
Digit Recognizer
|
13,570,098 |
print("Training done! Start prediction...")
valid_pred = classifier.predict_proba(valid_loader ).cpu().numpy()
valid_pred_df = pd.DataFrame({
"image_id": [dataset_dicts[i]["image_id"] for i in valid_inds],
"class0": valid_pred[:, 0],
"class1": valid_pred[:, 1]
})
valid_pred_df.to_csv(outdir/"valid_pred.csv", index=False)
test_meta = pd.read_csv(inputdir / "vinbigdata-testmeta" / "test_meta.csv")
dataset_dicts_test = get_vinbigdata_dicts_test(imgdir, test_meta, debug=debug)
test_dataset = VinbigdataTwoClassDataset(dataset_dicts_test, train=False)
test_loader = DataLoader(
test_dataset,
batch_size=flags.valid_batchsize,
num_workers=flags.num_workers,
shuffle=False,
pin_memory=True,
)
test_pred = classifier.predict_proba(test_loader ).cpu().numpy()
test_pred_df = pd.DataFrame({
"image_id": [d["image_id"] for d in dataset_dicts_test],
"class0": test_pred[:, 0],
"class1": test_pred[:, 1]
})
test_pred_df.to_csv(outdir/"test_pred.csv", index=False )<load_from_csv>
|
preds2 = model_LeNet5.evaluate(x = X_val, y = Y_val)
print("Loss = " + str(preds2[0]))
print("Val Accuracy = " + str(preds2[1]))
|
Digit Recognizer
|
13,570,098 |
pred_2class = pd.read_csv(inputdir/"vinbigdata2classpred/test_pred.csv")
low_threshold = 0.0
high_threshold = 0.976
pred_2class<load_from_csv>
|
conf_mat, class_report, accuracy, faults = analyze_model(model_LeNet5, X_val, Y_val )
|
Digit Recognizer
|
13,570,098 |
NORMAL = "14 1 0 0 1 1"
pred_det_df = pd.read_csv(inputdir/"vinbigdata-detectron2-prediction/results/20210125_all_alb_aug_512_cos/submission.csv")
n_normal_before = len(pred_det_df.query("PredictionString == @NORMAL"))
merged_df = pd.merge(pred_det_df, pred_2class, on="image_id", how="left")
if "target" in merged_df.columns:
merged_df["class0"] = 1 - merged_df["target"]
c0, c1, c2 = 0, 0, 0
for i in range(len(merged_df)) :
p0 = merged_df.loc[i, "class0"]
if p0 < low_threshold:
c0 += 1
elif low_threshold <= p0 and p0 < high_threshold:
merged_df.loc[i, "PredictionString"] += f" 14 {p0} 0 0 1 1"
c1 += 1
else:
merged_df.loc[i, "PredictionString"] = NORMAL
c2 += 1
n_normal_after = len(merged_df.query("PredictionString == @NORMAL"))
print(
f"n_normal: {n_normal_before} -> {n_normal_after} with threshold {low_threshold} & {high_threshold}"
)
print(f"Keep {c0} Add {c1} Replace {c2}")
submission_filepath = str(outdir / "submission.csv")
submission_df = merged_df[["image_id", "PredictionString"]]
submission_df.to_csv(submission_filepath, index=False)
print(f"Saved to {submission_filepath}" )<import_modules>
|
show_faults(faults )
|
Digit Recognizer
|
13,570,098 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go<load_from_csv>
|
datagen = ImageDataGenerator(rotation_range=20,
width_shift_range=0.1,
shear_range=0.2,
zoom_range=0.1,
data_format="channels_last")
|
Digit Recognizer
|
13,570,098 |
pred_2class = pd.read_csv(".. /input/vinbigdata-2class-prediction/2-cls test pred.csv")
pred_2class<init_hyperparams>
|
X_train_aug = X_train
Y_train_aug = Y_train
print("Original train size: " , X_train_aug.shape)
target_size = 32200
batch_size = 200
digits_gen = datagen.flow(X_train, Y_train, batch_size=batch_size)
for i in range(target_size//batch_size):
batch = digits_gen.next()
X_train_aug = np.append(X_train_aug, batch[0], axis=0)
Y_train_aug = np.append(Y_train_aug, batch[1], axis=0)
print("Augmented train size: " , X_train_aug.shape)
|
Digit Recognizer
|
13,570,098 |
low_threshold = 0.005
high_threshold = 0.95<create_dataframe>
|
model_LeNet5_aug = LeNet5BN(name="Augmented_BN" )
|
Digit Recognizer
|
13,570,098 |
commits_df = pd.DataFrame(columns = ['n_commit', 'low', 'high', 'LB_score'] )<feature_engineering>
|
early_stop = EarlyStopping(patience=5, monitor='loss', restore_best_weights=True)
batch_size = 128
model_LeNet5_aug.fit(X_train_aug, Y_train_aug, batch_size=batch_size,
epochs=100,
validation_data=(X_val, Y_val),
callbacks=[early_stop])
|
Digit Recognizer
|
13,570,098 |
n=0
commits_df.loc[n, 'n_commit'] = 0
commits_df.loc[n, 'low'] = 0.001
commits_df.loc[n, 'high'] = 0.87
commits_df.loc[n, 'LB_score'] = 0.246
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
preds3 = model_LeNet5_aug.evaluate(x = X_val, y = Y_val)
print("Loss = " + str(preds3[0]))
print("Val Accuracy = " + str(preds3[1]))
|
Digit Recognizer
|
13,570,098 |
n=1
commits_df.loc[n, 'n_commit'] = n
commits_df.loc[n, 'low'] = 0.001
commits_df.loc[n, 'high'] = 0.90
commits_df.loc[n, 'LB_score'] = 0.244
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
conf_mat, class_report, accuracy, faults = analyze_model(model_LeNet5_aug, X_val, Y_val )
|
Digit Recognizer
|
13,570,098 |
n=2
commits_df.loc[n, 'n_commit'] = n
commits_df.loc[n, 'low'] = 0.001
commits_df.loc[n, 'high'] = 0.94
commits_df.loc[n, 'LB_score'] = 0.241
commits_df.loc[n, 'LB_private'] = 0.228<feature_engineering>
|
y_pred = model_LeNet5_aug.predict(X_train_aug,batch_size=None, verbose=0)
mask = np.argmax(Y_train_aug, axis=1)== np.argmax(y_pred, axis=1)
x_hit = X_train_aug[mask]
y_hit = Y_train_aug[mask]
x_miss = X_train_aug[~mask]
y_miss = Y_train_aug[~mask]
num_misses = y_miss.shape[0]
sample_idx = sample(range(y_hit.shape[0]), y_miss.shape[0])
x_hit_reduced = x_hit[sample_idx]
y_hit_reduced = y_hit[sample_idx]
X_train2 = np.append(x_miss,x_hit_reduced, axis=0)
Y_train2 = np.append(y_miss,y_hit_reduced, axis=0)
idx_shuffle = list(range(X_train2.shape[0]))
np.random.shuffle(idx_shuffle)
X_train2 = X_train2[idx_shuffle]
Y_train2 = Y_train2[idx_shuffle]
|
Digit Recognizer
|
13,570,098 |
n=3
commits_df.loc[n, 'n_commit'] = n
commits_df.loc[n, 'low'] = 0.0
commits_df.loc[n, 'high'] = 0.91
commits_df.loc[n, 'LB_score'] = 0.243
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
model_LeNet5_2 = LeNet5BN(name="Net2" )
|
Digit Recognizer
|
13,570,098 |
n=4
commits_df.loc[n, 'n_commit'] = n
commits_df.loc[n, 'low'] = 0.002
commits_df.loc[n, 'high'] = 0.9
commits_df.loc[n, 'LB_score'] = 0.244
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
early_stop = EarlyStopping(patience=5, monitor='loss', restore_best_weights=True)
batch_size = 128
model_LeNet5_2.fit(X_train2, Y_train2,
epochs=100,
callbacks=[early_stop])
|
Digit Recognizer
|
13,570,098 |
n=5
commits_df.loc[n, 'n_commit'] = n
commits_df.loc[n, 'low'] = 0.005
commits_df.loc[n, 'high'] = 0.95
commits_df.loc[n, 'LB_score'] = 0.241
commits_df.loc[n, 'LB_private'] = 0.228<feature_engineering>
|
y_pred1 = model_LeNet5_aug.predict(X_train_aug, batch_size=None, verbose=0)
y_pred2 = model_LeNet5_2.predict(X_train_aug, batch_size=None, verbose=0)
mask = np.argmax(y_pred1, axis=1)== np.argmax(y_pred2, axis=1)
X_train3 = X_train_aug[~mask]
Y_train3 = Y_train_aug[~mask]
|
Digit Recognizer
|
13,570,098 |
n=6
commits_df.loc[n, 'n_commit'] = n+1
commits_df.loc[n, 'low'] = 0.001
commits_df.loc[n, 'high'] = 0.9
commits_df.loc[n, 'LB_score'] = 0.244
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
model_LeNet5_3 = LeNet5BN(name="Net3" )
|
Digit Recognizer
|
13,570,098 |
n=7
commits_df.loc[n, 'n_commit'] = n+3
commits_df.loc[n, 'low'] = 0.001
commits_df.loc[n, 'high'] = 0.88
commits_df.loc[n, 'LB_score'] = 0.246
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
early_stop = EarlyStopping(patience=5, monitor='loss', restore_best_weights=True)
batch_size = 128
model_LeNet5_3.fit(X_train3, Y_train3,
epochs=100,
callbacks=[early_stop] )
|
Digit Recognizer
|
13,570,098 |
n=8
commits_df.loc[n, 'n_commit'] = n+3
commits_df.loc[n, 'low'] = 0.001
commits_df.loc[n, 'high'] = 0.86
commits_df.loc[n, 'LB_score'] = 0.245
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
def boosted_lenet5(x):
pred1 = model_LeNet5_aug.predict(x,batch_size=None, verbose=0)
pred2 = model_LeNet5_2.predict(x,batch_size=None, verbose=0)
pred3 = model_LeNet5_3.predict(x,batch_size=None, verbose=0)
final_pred = np.argmax(pred1+pred2+pred3, axis = 1)
return final_pred
|
Digit Recognizer
|
13,570,098 |
n=9
commits_df.loc[n, 'n_commit'] = n+3
commits_df.loc[n, 'low'] = 0.001
commits_df.loc[n, 'high'] = 0.875
commits_df.loc[n, 'LB_score'] = 0.246
commits_df.loc[n, 'LB_private'] = 0.225<feature_engineering>
|
final_pred = boosted_lenet5(X_val )
|
Digit Recognizer
|
13,570,098 |
n=10
commits_df.loc[n, 'n_commit'] = n+3
commits_df.loc[n, 'low'] = 0.0
commits_df.loc[n, 'high'] = 0.875
commits_df.loc[n, 'LB_score'] = 0.246
commits_df.loc[n, 'LB_private'] = 0.225<feature_engineering>
|
conf_mat, class_report, accuracy, faults = analyze_results("boosted LeNet5", final_pred, np.argmax(Y_val, axis=1), X_val)
|
Digit Recognizer
|
13,570,098 |
n=11
commits_df.loc[n, 'n_commit'] = n+3
commits_df.loc[n, 'low'] = 0.002
commits_df.loc[n, 'high'] = 0.885
commits_df.loc[n, 'LB_score'] = 0.246
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
show_faults(faults )
|
Digit Recognizer
|
13,570,098 |
n=12
commits_df.loc[n, 'n_commit'] = n+3
commits_df.loc[n, 'low'] = 0.003
commits_df.loc[n, 'high'] = 0.88
commits_df.loc[n, 'LB_score'] = 0.246
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
acc = [preds1[1], preds2[1], preds3[1], accuracy]
models = ['LeNet5 Base', 'Improved', 'Augmented', 'Boosted LeNet5']
tbl = {"Model": models, "Accuracies": acc}
tbl_df = pd.DataFrame(tbl)
tbl_df
|
Digit Recognizer
|
13,570,098 |
n=13
commits_df.loc[n, 'n_commit'] = n+3
commits_df.loc[n, 'low'] = 0.005
commits_df.loc[n, 'high'] = 0.88
commits_df.loc[n, 'LB_score'] = 0.246
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
test_label = boosted_lenet5(test)
result = {"ImageId":range(1,1+test_label.shape[0]), "Label":test_label}
submission = pd.DataFrame(result)
submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
13,515,461 |
n=14
commits_df.loc[n, 'n_commit'] = n+3
commits_df.loc[n, 'low'] = 0.01
commits_df.loc[n, 'high'] = 0.88
commits_df.loc[n, 'LB_score'] = 0.246
commits_df.loc[n, 'LB_private'] = 0.226<feature_engineering>
|
X_train = []
Y_train = []
with open('/kaggle/input/digit-recognizer/train.csv')as train_file:
for line in train_file.readlines() [1:]:
content = line.strip().split(',')
label, image_raw_data = content[0], content[1:]
image_np = np.array_split(np.array(image_raw_data), 28)
X_train.append(image_np)
Y_train.append(label)
X_train = np.array(X_train ).astype(float)
X_train = np.expand_dims(X_train, axis=3)
Y_train = np.array(Y_train ).astype(float)
print(X_train.shape)
print(Y_train.shape )
|
Digit Recognizer
|
13,515,461 |
n=15
commits_df.loc[n, 'n_commit'] = n+3
commits_df.loc[n, 'low'] = 0.1
commits_df.loc[n, 'high'] = 0.88
commits_df.loc[n, 'LB_score'] = 0.246
commits_df.loc[n, 'LB_private'] = 0.226<data_type_conversions>
|
X_test = []
with open('/kaggle/input/digit-recognizer/test.csv')as test_file:
for line in test_file.readlines() [1:]:
content = line.strip().split(',')
image_raw_data = content[:]
image_np = np.array_split(np.array(image_raw_data), 28)
X_test.append(image_np)
X_test = np.array(X_test ).astype(float)
X_test = np.expand_dims(X_test, axis=3)
print(X_test.shape )
|
Digit Recognizer
|
13,515,461 |
commits_df['LB_score'] = pd.to_numeric(commits_df['LB_score'])
commits_df['LB_private'] = pd.to_numeric(commits_df['LB_private'])
commits_df = commits_df.sort_values(by=['LB_score'], ascending = False ).reset_index(drop=True)
commits_df['max'] = 0
commits_df.loc[15, 'max'] = 1
commits_df.loc[0, 'max'] = 2
commits_df['max_private'] = 0
commits_df.loc[0, 'max_private'] = 1
commits_df.loc[15, 'max_private'] = 2<sort_values>
|
!wget --recursive --no-parent 'https://github.com/google/fonts/raw/master/apache/opensans/OpenSans-Regular.ttf' -P /usr/local/lib/python3.6/dist-packages/matplotlib/mpl-data/fonts/ttf
!wget --recursive --no-parent 'https://github.com/google/fonts/raw/master/apache/opensans/OpenSans-Light.ttf' -P /usr/local/lib/python3.6/dist-packages/matplotlib/mpl-data/fonts/ttf
!wget --recursive --no-parent 'https://github.com/google/fonts/raw/master/apache/opensans/OpenSans-SemiBold.ttf' -P /usr/local/lib/python3.6/dist-packages/matplotlib/mpl-data/fonts/ttf
!wget --recursive --no-parent 'https://github.com/google/fonts/raw/master/apache/opensans/OpenSans-Bold.ttf' -P /usr/local/lib/python3.6/dist-packages/matplotlib/mpl-data/fonts/ttf
|
Digit Recognizer
|
13,515,461 |
commits_df.sort_values(by=['LB_score'], ascending = True )<sort_values>
|
model = tf.keras.models.Sequential([
tf.keras.Input(shape=(28, 28, 1)) ,
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(32,(5,5), activation='relu', kernel_initializer = 'he_uniform', padding="SAME"),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Conv2D(32,(5,5), activation='relu', kernel_initializer = 'he_uniform', padding="SAME"),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Conv2D(64,(3,3), activation='relu', kernel_initializer = 'he_uniform', padding="SAME"),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Conv2D(64,(3,3), activation='relu', kernel_initializer = 'he_uniform', padding="SAME"),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Conv2D(128,(3,3), activation='relu', kernel_initializer = 'he_uniform', padding="SAME"),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Conv2D(128,(3,3), activation='relu', kernel_initializer = 'he_uniform', padding="SAME"),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(512, activation='relu', kernel_initializer = 'he_uniform'),
tf.keras.layers.Dropout(rate=0.5),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dense(256, activation='relu', kernel_initializer = 'he_uniform'),
tf.keras.layers.Dropout(rate=0.5),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dense(128, activation='relu', kernel_initializer = 'he_uniform'),
tf.keras.layers.Dropout(rate=0.5),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dense(64, activation='relu', kernel_initializer = 'he_uniform'),
tf.keras.layers.Dropout(rate=0.5),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
model.summary()
early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor="accuracy", mode='max', patience=25)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint('./best_model.hdf5', monitor='accuracy',
mode='max', verbose=1, save_best_model=True)
reduce_lr_callback = tf.keras.callbacks.ReduceLROnPlateau(monitor='accuracy', mode='max', patience=15,
verbose=1, factor=0.5, min_lr=0.0001)
history = model.fit(
X_train / 255,
tf.keras.utils.to_categorical(Y_train, num_classes=10),
epochs=1000,
shuffle=True,
callbacks=[early_stopping_callback, model_checkpoint_callback,reduce_lr_callback]
)
|
Digit Recognizer
|
13,515,461 |
<feature_engineering><EOS>
|
predictions = model.predict(X_test / 255)
y_pred = np.argmax(predictions, axis=-1)
submission_df = pd.DataFrame(data={
'ImageId': np.arange(1, X_test.shape[0] + 1),
'label': y_pred
})
submission_df.to_csv('submission.csv', index=False)
submission_df
|
Digit Recognizer
|
13,404,090 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
|
A = np.array([[1, 3, 5], [5, 4, 1], [3, 8, 6]])
print(A)
cov_matrix = np.cov(A, rowvar=False, bias=True)
cov_matrix
|
Digit Recognizer
|
13,404,090 |
warnings.filterwarnings("ignore" )<load_from_csv>
|
sns.set_style("dark")
|
Digit Recognizer
|
13,404,090 |
train_data = pd.read_csv(path+'train.csv')
samp_subm = pd.read_csv(path+'sample_submission.csv' )<define_variables>
|
train_df_org = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_df_org = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
print("train_df_org shape ", train_df_org.shape)
print("test_df_org shape ", test_df_org.shape)
train_df_org.head()
|
Digit Recognizer
|
13,404,090 |
print('Number train samples:', len(train_data.index))
print('Number test samples:', len(samp_subm.index))<load_from_csv>
|
standardized_data = StandardScaler().fit_transform(train_df_org)
print(standardized_data.shape )
|
Digit Recognizer
|
13,404,090 |
idnum = 2
image_id = train_data.loc[idnum, 'image_id']
data_file = dicom.dcmread(path+'train/'+image_id+'.dicom')
img = data_file.pixel_array<save_to_csv>
|
projected_vec = np.matmul(eigenvectors, sample_data.T)
projected_vec.shape
|
Digit Recognizer
|
13,404,090 |
samp_subm.to_csv('submission1.csv', index=False )<load_from_csv>
|
pca_dataframe = pd.DataFrame(data=projected_vec, columns=('1st_principal_comp', "2nd_principal_comp", 'labels'))
pca_dataframe.head()
|
Digit Recognizer
|
13,404,090 |
pred_2class = pd.read_csv(".. /input/vinbigdata-2class-prediction/2-cls test pred.csv")
low_threshold = 0.001
high_threshold = 0.87
pred_2class<load_from_csv>
|
pca = decomposition.PCA()
pca.n_components = 2
pca_data_with_scikit = pca.fit_transform(standardized_data)
pca_data_with_scikit.shape
|
Digit Recognizer
|
13,404,090 |
NORMAL = "14 1 0 0 1 1"
pred_det_df = pd.read_csv(".. /input/vinbigdatastack/submission_postprocessed.csv")
n_normal_before = len(pred_det_df.query("PredictionString == @NORMAL"))
merged_df = pd.merge(pred_det_df, pred_2class, on="image_id", how="left")
if "target" in merged_df.columns:
merged_df["class0"] = 1 - merged_df["target"]
c0, c1, c2 = 0, 0, 0
for i in range(len(merged_df)) :
p0 = merged_df.loc[i, "class0"]
if p0 < low_threshold:
c0 += 1
elif low_threshold <= p0 and p0 < high_threshold:
merged_df.loc[i, "PredictionString"] += f" 14 {p0} 0 0 1 1"
c1 += 1
else:
merged_df.loc[i, "PredictionString"] = NORMAL
c2 += 1
n_normal_after = len(merged_df.query("PredictionString == @NORMAL"))
print(
f"n_normal: {n_normal_before} -> {n_normal_after} with threshold {low_threshold} & {high_threshold}"
)
print(f"Keep {c0} Add {c1} Replace {c2}")
submission_filepath = str("submission2.csv")
submission_df = merged_df[["image_id", "PredictionString"]]
submission_df.to_csv(submission_filepath, index=False)
print(f"Saved to {submission_filepath}" )<define_variables>
|
df_PCA_scikit = pd.DataFrame(data=pca_data_with_scikit, columns=('f1_PC', 'f2_PC', 'labels'))
df_PCA_scikit.head()
|
Digit Recognizer
|
13,404,090 |
LABELS = ["isFraud"]<set_options>
|
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras import metrics
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Lambda, Flatten, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPool2D, AvgPool2D
from tensorflow.keras.optimizers import Adadelta
from keras.utils.np_utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import LearningRateScheduler
|
Digit Recognizer
|
13,404,090 |
%matplotlib inline
all_files = glob.glob(".. /input/lgmodels/*.csv")
all_files
<load_from_csv>
|
train_df_org = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
mnist_train_label = train_df_org.loc[:, "label"]
mnist_train_df = train_df_org.loc[:, "pixel0":]
|
Digit Recognizer
|
13,404,090 |
predict_list = []
predict_list.append(pd.read_csv('.. /input/lgmodels/Submission-.9433.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-.9451.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-.9459.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-.9463.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-0.9467.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/Submission-.9440.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-.9454.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-0.9466.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-0.9475.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-0.9433.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-0.9468.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-.9452.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/Submission-.9429.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-.9449.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-.9457.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/Submission-.9438.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/Submission-.9442.csv')[LABELS].values)
predict_list.append(pd.read_csv('.. /input/lgmodels/submission-.9469.csv')[LABELS].values)
<save_to_csv>
|
num_of_digit_classes = mnist_train_label_array.max() - mnist_train_label_array.min() + 1
mnist_train_label_array = to_categorical(mnist_train_label_array, num_classes=num_of_digit_classes)
print('Shape of ytrain after encoding and converting to categorical values ', mnist_train_label_array.shape)
print(mnist_train_label_array)
|
Digit Recognizer
|
13,404,090 |
warnings.filterwarnings("ignore")
print("Rank averaging on ", len(predict_list), " files")
predictions = np.zeros_like(predict_list[0])
for predict in predict_list:
for i in range(1):
predictions[:, i] = np.add(predictions[:, i], rankdata(predict[:, i])/predictions.shape[0])
predictions /= len(predict_list)
submission = pd.read_csv('.. /input/ieee-fraud-detection/sample_submission.csv')
submission[LABELS] = predictions
submission.to_csv('AggStacker.csv', index=False )<define_variables>
|
def run_model(input_shape=(28, 28, 1)) :
model = Sequential()
model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape = input_shape))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, kernel_size = 4, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
return model
def run_model_compilation(model, optimizer='adam', loss='categorical_crossentropy'):
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
def run_model_training(model, train, test, epochs, split):
history=model.fit(train, test, shuffle=True, epochs=epochs, validation_split=split)
return history
|
Digit Recognizer
|
13,404,090 |
sub_path = ".. /input/lgmodels/"
all_files = os.listdir(sub_path)
all_files<feature_engineering>
|
cnn_model = run_model(( 28,28, 1))
run_model_compilation(cnn_model, 'adam', 'categorical_crossentropy')
model_history = run_model_training(cnn_model, mnist_train_array, mnist_train_label_array, 100, 0.2 )
|
Digit Recognizer
|
13,404,090 |
rank = np.tril(concat_sub.iloc[:,1:].corr().values,-1)
m =(rank>0 ).sum()
m_gmean, s = 0, 0
for n in range(min(rank.shape[0],m)) :
mx = np.unravel_index(rank.argmin() , rank.shape)
w =(m-n)/(m+n)
print(w)
m_gmean += w*(np.log(concat_sub.iloc[:,mx[0]+1])+np.log(concat_sub.iloc[:,mx[1]+1])) /2
s += w
rank[mx] = 1
m_gmean = np.exp(m_gmean/s )<save_to_csv>
|
final_predictions = cnn_model.predict(mnist_test_array)
prediction_test_array = []
for i in final_predictions:
prediction_test_array.append(np.argmax(i))
|
Digit Recognizer
|
13,404,090 |
<save_to_csv><EOS>
|
submission = pd.DataFrame({
'ImageId': test_df_org.index+1,
'Label': prediction_test_array
})
submission.to_csv('final_submission.csv', index=False )
|
Digit Recognizer
|
13,324,252 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<import_modules>
|
%matplotlib inline
np.random.seed(2 )
|
Digit Recognizer
|
13,324,252 |
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from string import punctuation<import_modules>
|
test = pd.read_csv('.. /input/digit-recognizer/test.csv')
|
Digit Recognizer
|
13,324,252 |
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Input, LSTM, Embedding, Bidirectional
from tensorflow.keras import layers, utils, callbacks, optimizers, regularizers
from tqdm import tqdm<define_variables>
|
( X_train, Y_train),(X_val, Y_val)= mnist.load_data()
|
Digit Recognizer
|
13,324,252 |
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')) )<load_from_csv>
|
X_train = X_train.reshape(-1, 28, 28, 1)
X_val = X_val.reshape(-1, 28, 28, 1)
X_train = X_train.astype('float32')
X_val = X_val.astype('float32')
test = test.values.reshape(-1, 28, 28, 1)
print(X_train.shape,', ',X_val.shape,', ', test.shape )
|
Digit Recognizer
|
13,324,252 |
train = pd.read_csv(".. /input/quora-question-pairs/train.csv.zip")
test = pd.read_csv(".. /input/quora-question-pairs/test.csv" )<drop_column>
|
Y_train = to_categorical(Y_train, num_classes = 10)
Y_val = to_categorical(Y_val, num_classes = 10 )
|
Digit Recognizer
|
13,324,252 |
def clean_dataframe_train(train):
stop_words = ['the','a','an','and','but','if','or','because','as','what','which','this','that','these','those','then',
'just','so','than','such','both','through','about','for','is','of','while','during','to','What','Which',
'Is','If','While','This']
def text_to_wordlist(text, remove_stop_words=True, stem_words=False):
text = re.sub(r"[^A-Za-z0-9]", " ", text)
text = re.sub(r"what's", "", text)
text = re.sub(r"What's", "", text)
text = re.sub(r"'s", " ", text)
text = re.sub(r"'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"I'm", "I am", text)
text = re.sub(r" m ", " am ", text)
text = re.sub(r"'re", " are ", text)
text = re.sub(r"'d", " would ", text)
text = re.sub(r"'ll", " will ", text)
text = re.sub(r"60k", " 60000 ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e-mail", "email", text)
text = re.sub(r"\s{2,}", " ", text)
text = re.sub(r"quikly", "quickly", text)
text = re.sub(r" usa ", " America ", text)
text = re.sub(r" USA ", " America ", text)
text = re.sub(r" u s ", " America ", text)
text = re.sub(r" uk ", " England ", text)
text = re.sub(r" UK ", " England ", text)
text = re.sub(r"india", "India", text)
text = re.sub(r"switzerland", "Switzerland", text)
text = re.sub(r"china", "China", text)
text = re.sub(r"chinese", "Chinese", text)
text = re.sub(r"imrovement", "improvement", text)
text = re.sub(r"intially", "initially", text)
text = re.sub(r"quora", "Quora", text)
text = re.sub(r" dms ", "direct messages ", text)
text = re.sub(r"demonitization", "demonetization", text)
text = re.sub(r"actived", "active", text)
text = re.sub(r"kms", " kilometers ", text)
text = re.sub(r"KMs", " kilometers ", text)
text = re.sub(r" cs ", " computer science ", text)
text = re.sub(r" upvotes ", " up votes ", text)
text = re.sub(r" iPhone ", " phone ", text)
text = re.sub(r"\0rs ", " rs ", text)
text = re.sub(r"calender", "calendar", text)
text = re.sub(r"ios", "operating system", text)
text = re.sub(r"gps", "GPS", text)
text = re.sub(r"gst", "GST", text)
text = re.sub(r"programing", "programming", text)
text = re.sub(r"bestfriend", "best friend", text)
text = re.sub(r"dna", "DNA", text)
text = re.sub(r"III", "3", text)
text = re.sub(r"the US", "America", text)
text = re.sub(r"Astrology", "astrology", text)
text = re.sub(r"Method", "method", text)
text = re.sub(r"Find", "find", text)
text = re.sub(r"banglore", "Banglore", text)
text = re.sub(r" J K ", " JK ", text)
text = ''.join([c for c in text if c not in punctuation])
if remove_stop_words:
text = text.split()
text = [w for w in text if not w in stop_words]
text = " ".join(text)
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word)for word in text]
text = " ".join(stemmed_words)
return(text)
def process_questions(question_list, questions, question_list_name, dataframe):
for question in questions:
question_list.append(text_to_wordlist(str(question)))
if len(question_list)% 100000 == 0:
progress = len(question_list)/len(dataframe)* 100
print("{} is {}% complete.".format(question_list_name, round(progress, 1)))
train_question1 = []
process_questions(train_question1, train.question1, 'train_question1', train)
train_question2 = []
process_questions(train_question2, train.question2, 'train_question2', train)
train["question1"] = train_question1
train["question2"] = train_question2
return train<drop_column>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5, 5), padding = "Same", activation = "relu",
kernel_initializer = 'he_uniform', input_shape =(28, 28, 1)))
model.add(Conv2D(filters = 32, kernel_size =(5, 5), padding = "Same", activation = "relu",
kernel_initializer = 'he_uniform'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3, 3), padding = "Same", activation = "relu",
kernel_initializer = 'he_uniform'))
model.add(Conv2D(filters = 64, kernel_size =(3, 3), padding = "Same", activation = "relu",
kernel_initializer = 'he_uniform'))
model.add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation="relu", kernel_initializer = 'he_uniform'))
model.add(Dropout(0.5))
model.add(Dense(units = 10, activation="softmax", kernel_initializer = 'he_uniform'))
|
Digit Recognizer
|
13,324,252 |
def clean_dataframe_test(test):
stop_words = ['the','a','an','and','but','if','or','because','as','what','which','this','that','these','those','then',
'just','so','than','such','both','through','about','for','is','of','while','during','to','What','Which',
'Is','If','While','This']
def text_to_wordlist(text, remove_stop_words=True, stem_words=False):
text = re.sub(r"[^A-Za-z0-9]", " ", text)
text = re.sub(r"what's", "", text)
text = re.sub(r"What's", "", text)
text = re.sub(r"'s", " ", text)
text = re.sub(r"'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"I'm", "I am", text)
text = re.sub(r" m ", " am ", text)
text = re.sub(r"'re", " are ", text)
text = re.sub(r"'d", " would ", text)
text = re.sub(r"'ll", " will ", text)
text = re.sub(r"60k", " 60000 ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e-mail", "email", text)
text = re.sub(r"\s{2,}", " ", text)
text = re.sub(r"quikly", "quickly", text)
text = re.sub(r" usa ", " America ", text)
text = re.sub(r" USA ", " America ", text)
text = re.sub(r" u s ", " America ", text)
text = re.sub(r" uk ", " England ", text)
text = re.sub(r" UK ", " England ", text)
text = re.sub(r"india", "India", text)
text = re.sub(r"switzerland", "Switzerland", text)
text = re.sub(r"china", "China", text)
text = re.sub(r"chinese", "Chinese", text)
text = re.sub(r"imrovement", "improvement", text)
text = re.sub(r"intially", "initially", text)
text = re.sub(r"quora", "Quora", text)
text = re.sub(r" dms ", "direct messages ", text)
text = re.sub(r"demonitization", "demonetization", text)
text = re.sub(r"actived", "active", text)
text = re.sub(r"kms", " kilometers ", text)
text = re.sub(r"KMs", " kilometers ", text)
text = re.sub(r" cs ", " computer science ", text)
text = re.sub(r" upvotes ", " up votes ", text)
text = re.sub(r" iPhone ", " phone ", text)
text = re.sub(r"\0rs ", " rs ", text)
text = re.sub(r"calender", "calendar", text)
text = re.sub(r"ios", "operating system", text)
text = re.sub(r"gps", "GPS", text)
text = re.sub(r"gst", "GST", text)
text = re.sub(r"programing", "programming", text)
text = re.sub(r"bestfriend", "best friend", text)
text = re.sub(r"dna", "DNA", text)
text = re.sub(r"III", "3", text)
text = re.sub(r"the US", "America", text)
text = re.sub(r"Astrology", "astrology", text)
text = re.sub(r"Method", "method", text)
text = re.sub(r"Find", "find", text)
text = re.sub(r"banglore", "Banglore", text)
text = re.sub(r" J K ", " JK ", text)
text = ''.join([c for c in text if c not in punctuation])
if remove_stop_words:
text = text.split()
text = [w for w in text if not w in stop_words]
text = " ".join(text)
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word)for word in text]
text = " ".join(stemmed_words)
return(text)
def process_questions(question_list, questions, question_list_name, dataframe):
for question in questions:
question_list.append(text_to_wordlist(str(question)))
if len(question_list)% 100000 == 0:
progress = len(question_list)/len(dataframe)* 100
print("{} is {}% complete.".format(question_list_name, round(progress, 1)))
test_question1 = []
process_questions(test_question1, test.question1, 'test_question1', test)
test_question2 = []
process_questions(test_question2, test.question2, 'test_question2', test)
test["question1"] = test_question1
test["question2"] = test_question2
return test<create_dataframe>
|
optimizer = RMSprop(lr = 0.001, rho = 0.9, epsilon = 1e-08, decay = 0.0)
model.compile(optimizer = optimizer , loss = 'categorical_crossentropy', metrics = ['accuracy'] )
|
Digit Recognizer
|
13,324,252 |
train = clean_dataframe_train(train)
test = clean_dataframe_test(test )<define_variables>
|
checkpoint = ModelCheckpoint('./mod_best.hdf5',monitor = 'val_loss', mode = "min", verbose = 1, save_best_model = True)
earlystop = EarlyStopping(monitor = 'val_loss', patience = 3, min_delta = 0,verbose =1, restore_best_weights = True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=3, verbose=1, factor=0.5, min_lr=0.00001)
callbacks = [reduce_lr,checkpoint]
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.