kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
6,925,490 |
class Classifier(nn.Module):
def __init__(self, predictor, lossfun=cross_entropy_with_logits):
super().__init__()
self.predictor = predictor
self.lossfun = lossfun
self.prefix = ""
def forward(self, image, targets):
outputs = self.predictor(image)
loss = self.lossfun(outputs, targets)
metrics = {
f"{self.prefix}loss": loss.item() ,
f"{self.prefix}acc": accuracy_with_logits(outputs, targets ).item()
}
ppe.reporting.report(metrics, self)
return loss, metrics
def predict(self, data_loader):
pred = self.predict_proba(data_loader)
label = torch.argmax(pred, dim=1)
return label
def predict_proba(self, data_loader):
device: torch.device = next(self.parameters() ).device
y_list = []
self.eval()
with torch.no_grad() :
for batch in data_loader:
if isinstance(batch,(tuple, list)) :
batch = batch[0].to(device)
else:
batch = batch.to(device)
y = self.predictor(batch)
y = torch.softmax(y, dim=-1)
y_list.append(y)
pred = torch.cat(y_list)
return pred
<find_best_params>
|
mean_px = train_x_sol5.mean().astype(np.float32)
std_px = train_x_sol5.std().astype(np.float32)
def standardize(x):
return(x-mean_px)/std_px
|
Digit Recognizer
|
6,925,490 |
supported_models = timm.list_models()
print(f"{len(supported_models)} models are supported in timm.")
print(supported_models )<import_modules>
|
s5_train_x, s5_test_x, s5_train_y, s5_test_y = train_test_split(train_x_sol5, train_y_sol5,
test_size=0.2,
random_state=81)
ohe_s5_train_y = tf_utils.to_categorical(s5_train_y, 10)
ohe_s5_test_y = tf_utils.to_categorical(s5_test_y, 10)
train_batches_sol5 = image_augmentator.flow(s5_train_x, ohe_s5_train_y, batch_size=64)
val_batches_sol5 = image_augmentator.flow(s5_test_x, ohe_s5_test_y, batch_size=64 )
|
Digit Recognizer
|
6,925,490 |
class EMA(object):
def __init__(
self,
model: nn.Module,
decay: float,
strict: bool = True,
use_dynamic_decay: bool = True,
):
self.decay = decay
self.model = model
self.strict = strict
self.use_dynamic_decay = use_dynamic_decay
self.logger = getLogger(__name__)
self.n_step = 0
self.shadow = {}
self.original = {}
self._assigned = False
for name, param in model.named_parameters() :
if param.requires_grad:
self.shadow[name] = param.data.clone()
def step(self):
self.n_step += 1
if self.use_dynamic_decay:
_n_step = float(self.n_step)
decay = min(self.decay,(1.0 + _n_step)/(10.0 + _n_step))
else:
decay = self.decay
for name, param in self.model.named_parameters() :
if param.requires_grad:
assert name in self.shadow
new_average =(1.0 - decay)* param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
__call__ = step
def assign(self):
if self._assigned:
if self.strict:
raise ValueError("[ERROR] `assign` is called again before `resume`.")
else:
self.logger.warning(
"`assign` is called again before `resume`."
"shadow parameter is already assigned, skip."
)
return
for name, param in self.model.named_parameters() :
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone()
param.data = self.shadow[name]
self._assigned = True
def resume(self):
if not self._assigned:
if self.strict:
raise ValueError("[ERROR] `resume` is called before `assign`.")
else:
self.logger.warning("`resume` is called before `assign`, skip.")
return
for name, param in self.model.named_parameters() :
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name]
self._assigned = False
<set_options>
|
model_sol_5 = Sequential()
model_sol_5.add(Lambda(standardize, input_shape=(28,28,1)))
model_sol_5.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu',
kernel_regularizer=regularizers.l2(0.1),
))
model_sol_5.add(BatchNormalization())
model_sol_5.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'
))
model_sol_5.add(MaxPooling2D(pool_size=2))
model_sol_5.add(BatchNormalization())
model_sol_5.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'
))
model_sol_5.add(MaxPooling2D(pool_size=2))
model_sol_5.add(Flatten())
model_sol_5.add(BatchNormalization())
model_sol_5.add(Dense(64))
model_sol_5.add(Activation('relu'))
model_sol_5.add(Dropout(0.2))
model_sol_5.add(BatchNormalization())
model_sol_5.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
6,925,490 |
class LRScheduler(Extension):
trigger = 1, 'iteration'
priority = PRIORITY_READER
name = None
def __init__(self, optimizer: optim.Optimizer, scheduler_type: str, scheduler_kwargs: Mapping[str, Any])-> None:
super().__init__()
self.scheduler = getattr(optim.lr_scheduler, scheduler_type )(optimizer, **scheduler_kwargs)
def __call__(self, manager: ExtensionsManager)-> None:
self.scheduler.step()
def state_dict(self)-> None:
return self.scheduler.state_dict()
def load_state_dict(self, to_load)-> None:
self.scheduler.load_state_dict(to_load)
<train_model>
|
model_sol_5.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'] )
|
Digit Recognizer
|
6,925,490 |
def create_trainer(model, optimizer, device)-> Engine:
model.to(device)
def update_fn(engine, batch):
model.train()
optimizer.zero_grad()
loss, metrics = model(*[elem.to(device)for elem in batch])
loss.backward()
optimizer.step()
return metrics
trainer = Engine(update_fn)
return trainer
<import_modules>
|
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', verbose=1, save_best_only=True)
hist_sol_5 = model_sol_5.fit_generator(generator=train_batches_sol5, steps_per_epoch=s5_train_x.shape[0] // 64,
epochs=32, callbacks=[checkpointer],
validation_data=val_batches_sol5, validation_steps=s5_test_x.shape[0] // 64, verbose=2 )
|
Digit Recognizer
|
6,925,490 |
import dataclasses
import os
import sys
from pathlib import Path
import numpy as np
import pandas as pd
import pytorch_pfn_extras.training.extensions as E
import torch
from ignite.engine import Events
from pytorch_pfn_extras.training import IgniteExtensionsManager
from sklearn.model_selection import StratifiedKFold
from torch import nn, optim
from torch.utils.data.dataloader import DataLoader<split>
|
model_sol_5.load_weights('mnist.model.best.hdf5')
score = model_sol_5.evaluate(s5_test_x, ohe_s5_test_y, verbose=0)
accuracy = 100 * score[1]
print('Test accuracy: %.4f%%' % accuracy )
|
Digit Recognizer
|
6,925,490 |
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=flags.seed)
y = np.array([int(len(d["annotations"])> 0)for d in dataset_dicts])
split_inds = list(skf.split(dataset_dicts, y))
train_inds, valid_inds = split_inds[flags.target_fold]
train_dataset = VinbigdataTwoClassDataset(
[dataset_dicts[i] for i in train_inds],
image_transform=Transform(flags.aug_kwargs),
mixup_prob=flags.mixup_prob,
label_smoothing=flags.label_smoothing,
)
valid_dataset = VinbigdataTwoClassDataset([dataset_dicts[i] for i in valid_inds])
<choose_model_class>
|
predictions = model_sol_5.predict(test_x_sol5)
predictions = [ np.argmax(x)for x in predictions ]
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
submission.drop('Label', axis=1, inplace=True)
submission['Label'] = predictions
submission.to_csv('submission5.csv', index=False )
|
Digit Recognizer
|
6,925,490 |
train_loader = DataLoader(
train_dataset,
batch_size=flags.batchsize,
num_workers=flags.num_workers,
shuffle=True,
pin_memory=True,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=flags.valid_batchsize,
num_workers=flags.num_workers,
shuffle=False,
pin_memory=True,
)
device = torch.device(flags.device)
predictor = build_predictor(model_name=flags.model_name, model_mode=flags.model_mode)
classifier = Classifier(predictor)
model = classifier
optimizer = optim.Adam([param for param in model.parameters() if param.requires_grad], lr=1e-3)
trainer = create_trainer(model, optimizer, device)
ema = EMA(predictor, decay=flags.ema_decay)
def eval_func(*batch):
loss, metrics = model(*[elem.to(device)for elem in batch])
if flags.ema_decay > 0:
classifier.prefix = "ema_"
ema.assign()
loss, metrics = model(*[elem.to(device)for elem in batch])
ema.resume()
classifier.prefix = ""
valid_evaluator = E.Evaluator(
valid_loader, model, progress_bar=False, eval_func=eval_func, device=device
)
log_trigger =(1, "epoch")
log_report = E.LogReport(trigger=log_trigger)
extensions = [
log_report,
E.ProgressBarNotebook(update_interval=10 if debug else 100),
E.PrintReportNotebook() ,
E.FailOnNonNumber() ,
]
epoch = flags.epoch
models = {"main": model}
optimizers = {"main": optimizer}
manager = IgniteExtensionsManager(
trainer, models, optimizers, epoch, extensions=extensions, out_dir=str(outdir),
)
manager.extend(valid_evaluator)
manager.extend(
E.snapshot_object(predictor, "predictor.pt"), trigger=(flags.snapshot_freq, "epoch")
)
if flags.scheduler_type != "":
scheduler_type = flags.scheduler_type
print(f"using {scheduler_type} scheduler with kwargs {flags.scheduler_kwargs}")
manager.extend(
LRScheduler(optimizer, scheduler_type, flags.scheduler_kwargs),
trigger=flags.scheduler_trigger,
)
manager.extend(E.observe_lr(optimizer=optimizer), trigger=log_trigger)
if flags.ema_decay > 0:
manager.extend(lambda manager: ema() , trigger=(1, "iteration"))
def save_ema_model(manager):
ema.assign()
torch.save(predictor.state_dict() , outdir / "predictor_ema.pt")
ema.resume()
manager.extend(save_ema_model, trigger=(flags.snapshot_freq, "epoch"))
_ = trainer.run(train_loader, max_epochs=epoch )<save_to_csv>
|
model_sol_6 = Sequential()
model_sol_6.add(Lambda(standardize, input_shape=(28,28,1)))
model_sol_6.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
model_sol_6.add(BatchNormalization())
model_sol_6.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
model_sol_6.add(MaxPooling2D(pool_size=2))
model_sol_6.add(Dropout(0.1))
model_sol_6.add(BatchNormalization())
model_sol_6.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
model_sol_6.add(MaxPooling2D(pool_size=2))
model_sol_6.add(Flatten())
model_sol_6.add(BatchNormalization())
model_sol_6.add(Dense(64))
model_sol_6.add(Activation('relu'))
model_sol_6.add(Dropout(0.2))
model_sol_6.add(BatchNormalization())
model_sol_6.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
6,925,490 |
torch.save(predictor.state_dict() , outdir / "predictor_last.pt")
df = log_report.to_dataframe()
df.to_csv(outdir / "log.csv", index=False)
df<save_to_csv>
|
model_sol_6.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'] )
|
Digit Recognizer
|
6,925,490 |
print("Training done! Start prediction...")
valid_pred = classifier.predict_proba(valid_loader ).cpu().numpy()
valid_pred_df = pd.DataFrame({
"image_id": [dataset_dicts[i]["image_id"] for i in valid_inds],
"class0": valid_pred[:, 0],
"class1": valid_pred[:, 1]
})
valid_pred_df.to_csv(outdir/"valid_pred.csv", index=False)
test_meta = pd.read_csv(inputdir / "vinbigdata-testmeta" / "test_meta.csv")
dataset_dicts_test = get_vinbigdata_dicts_test(imgdir, test_meta, debug=debug)
test_dataset = VinbigdataTwoClassDataset(dataset_dicts_test, train=False)
test_loader = DataLoader(
test_dataset,
batch_size=flags.valid_batchsize,
num_workers=flags.num_workers,
shuffle=False,
pin_memory=True,
)
test_pred = classifier.predict_proba(test_loader ).cpu().numpy()
test_pred_df = pd.DataFrame({
"image_id": [d["image_id"] for d in dataset_dicts_test],
"class0": test_pred[:, 0],
"class1": test_pred[:, 1]
})
test_pred_df.to_csv(outdir/"test_pred.csv", index=False )<load_from_csv>
|
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', verbose=1, save_best_only=True)
hist_sol_6 = model_sol_6.fit_generator(generator=train_batches_sol5, steps_per_epoch=s5_train_x.shape[0] // 64,
epochs=32, callbacks=[checkpointer],
validation_data=val_batches_sol5, validation_steps=s5_test_x.shape[0] // 64, verbose=2 )
|
Digit Recognizer
|
6,925,490 |
pred_2class = pd.read_csv(inputdir/"vinbigdata2classpred/test_pred.csv")
low_threshold = 0.0
high_threshold = 0.976
pred_2class<load_from_csv>
|
model_sol_6.load_weights('mnist.model.best.hdf5')
score = model_sol_6.evaluate(s5_test_x, ohe_s5_test_y, verbose=0)
accuracy = 100 * score[1]
print('Test accuracy: %.4f%%' % accuracy )
|
Digit Recognizer
|
6,925,490 |
NORMAL = "14 1 0 0 1 1"
pred_det_df = pd.read_csv(inputdir/"vinbigdata-detectron2-prediction/results/20210125_all_alb_aug_512_cos/submission.csv")
n_normal_before = len(pred_det_df.query("PredictionString == @NORMAL"))
merged_df = pd.merge(pred_det_df, pred_2class, on="image_id", how="left")
if "target" in merged_df.columns:
merged_df["class0"] = 1 - merged_df["target"]
c0, c1, c2 = 0, 0, 0
for i in range(len(merged_df)) :
p0 = merged_df.loc[i, "class0"]
if p0 < low_threshold:
c0 += 1
elif low_threshold <= p0 and p0 < high_threshold:
merged_df.loc[i, "PredictionString"] += f" 14 {p0} 0 0 1 1"
c1 += 1
else:
merged_df.loc[i, "PredictionString"] = NORMAL
c2 += 1
n_normal_after = len(merged_df.query("PredictionString == @NORMAL"))
print(
f"n_normal: {n_normal_before} -> {n_normal_after} with threshold {low_threshold} & {high_threshold}"
)
print(f"Keep {c0} Add {c1} Replace {c2}")
submission_filepath = str(outdir / "submission.csv")
submission_df = merged_df[["image_id", "PredictionString"]]
submission_df.to_csv(submission_filepath, index=False)
print(f"Saved to {submission_filepath}" )<load_from_csv>
|
model_sol_6.optimizer.lerning_rate=0.01
gen = ImageDataGenerator()
batches = gen.flow(train_x_sol5, tf_utils.to_categorical(train_y_sol5, 10), batch_size=64)
hist_sol_6 = model_sol_6.fit_generator(generator=batches, steps_per_epoch=train_x_sol5.shape[0] // 64,
epochs=50, verbose=2)
|
Digit Recognizer
|
6,925,490 |
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
train<load_from_csv>
|
predictions = model_sol_6.predict(test_x_sol5)
predictions = [ np.argmax(x)for x in predictions ]
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
submission.drop('Label', axis=1, inplace=True)
submission['Label'] = predictions
submission.to_csv('submission6.csv', index=False )
|
Digit Recognizer
|
6,925,490 |
submission=pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
print(train.columns)
print(test.shape)
train.drop(['Name', 'Ticket', 'Fare', 'Embarked'], axis=1, inplace=True)
test.drop(['Name', 'Ticket', 'Fare', 'Embarked'], axis=1, inplace=True)
train['Sex'].replace({'male':0, 'female':1}, inplace=True)
test['Sex'].replace({'male':0, 'female':1}, inplace=True)
train.drop(['Cabin'], axis=1,inplace=True)
test.drop(['Cabin'], axis=1,inplace=True )<prepare_x_and_y>
|
os.remove('submission1.csv')
os.remove('submission2.csv')
os.remove('submission3.csv')
os.remove('submission4.csv')
os.remove('submission5.csv')
os.remove('submission6.csv' )
|
Digit Recognizer
|
6,925,490 |
X = train.drop(['Survived', 'PassengerId'], axis=1)
y = train['Survived']
X_test = test.drop(['PassengerId'], axis=1)
<train_model>
|
final_train_x = train_x[..., tf.newaxis]
final_ohe_train_y = tf_utils.to_categorical(train_y, 10)
final_train_batches = image_augmentator.flow(final_train_x, final_ohe_train_y, batch_size=64)
final_model = Sequential()
final_model.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', input_shape=extended_splitted_train_X.shape[1:]))
final_model.add(BatchNormalization())
final_model.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
final_model.add(MaxPooling2D(pool_size=2))
final_model.add(Dropout(0.1))
final_model.add(BatchNormalization())
final_model.add(Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
final_model.add(MaxPooling2D(pool_size=2))
final_model.add(Flatten())
final_model.add(BatchNormalization())
final_model.add(Dense(64))
final_model.add(Activation('relu'))
final_model.add(Dropout(0.2))
final_model.add(BatchNormalization())
final_model.add(Dense(10, activation='softmax'))
final_model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
final_model.fit_generator(generator=final_train_batches, steps_per_epoch=final_train_batches.n,
epochs=1, verbose=1 )
|
Digit Recognizer
|
6,925,490 |
<save_to_csv><EOS>
|
predictions = final_model.predict(test_x[..., tf.newaxis])
predictions = [ np.argmax(x)for x in predictions ]
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
submission.drop('Label', axis=1, inplace=True)
submission['Label'] = predictions
submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
2,404,415 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
|
Digit Recognizer
|
2,404,415 |
pd.set_option("display.precision", 4)
sns.set(style="darkgrid")
warnings.filterwarnings('ignore')
TRAIN_LEN = 891
RNG_SEED = 343
COLS_TO_DROP = []
CHILD_AGE_END = 18
DECKS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'N']
DEFAULT_SURVIVAL = 0.5
def get_deck_class_count(df, T_deck):
deck_count = {'A': {}, 'B': {}, 'C': {}, 'D': {}, 'E': {}, 'F': {},
'G': {}, 'N': {}, 'T': {}}
if not T_deck:
deck_count.pop('T', None)
deck_percent = {}
decks = df.transpose().columns.levels[0]
for deck in decks:
for pclass in range(1, 4):
try:
count = int(df.loc[deck, pclass])
deck_count[deck][pclass] = count
except KeyError:
deck_count[deck][pclass] = 0
deck_percent[deck] = [(count / sum(deck_count[deck].values()))
* 100 for count in deck_count[deck].values() ]
return deck_count, deck_percent
def get_surv_prop(deck, pclass):
return deck_class_surv_prop[deck][pclass]
def get_corr(df):
corr = df.corr().abs().unstack().sort_values(kind="quicksort",
ascending=False ).reset_index()
cols_map = {"level_0": "Feature 1",
"level_1": "Feature 2",
0: 'Correlation Coefficient'}
corr.drop(corr.iloc[1::2].index, inplace=True)
corr.rename(columns=cols_map, inplace=True)
return corr.drop(corr[corr['Correlation Coefficient'] == 1.0].index)
def combine_df(df1, df2):
return pd.concat([df1, df2], sort=True ).reset_index(drop=True)
def divide_df(df, first_len):
return df.loc[:first_len - 1], df.loc[first_len:].drop(['Survived'], axis=1)
def drop_cols(cols):
for col in cols:
train.drop([col], inplace=True, axis=1)
test.drop([col], inplace=True, axis=1)
return
def display_class_dist(percentages, y_label, title):
df_percent = pd.DataFrame(percentages ).transpose()
deck_names = percentages.keys()
bar_count = np.arange(len(deck_names))
bar_width = 0.75
plt.figure(figsize=(16, 8))
plt.bar(bar_count, df_percent[0],
color='red', edgecolor='black', width=bar_width,
label='Passenger Class 1')
plt.bar(bar_count, df_percent[1], bottom=df_percent[0],
color='lime', edgecolor='black', width=bar_width,
label='Passenger Class 2')
plt.bar(bar_count, df_percent[2], bottom=df_percent[0] + df_percent[1],
color='blue', edgecolor='black', width=bar_width,
label='Passenger Class 3')
plt.xlabel('Deck', size=25)
plt.ylabel(y_label, size=25)
plt.xticks(bar_count, deck_names)
plt.tick_params(axis='x', labelsize=15)
plt.tick_params(axis='y', labelsize=15)
plt.legend(loc='upper right', prop={'size': 15})
plt.title(title, size=30, y=1)
plt.show()
return
def group_survivors(df, group, new_feature_name):
df[new_feature_name] = DEFAULT_SURVIVAL
for _, group_df in df.groupby(group):
if len(group_df)> 1:
surv_max = group_df['Survived'].max()
surv_min = group_df['Survived'].min()
if isnan(surv_max)and isnan(surv_min):
continue
for _, row in group_df.iterrows() :
passId = row['PassengerId']
if(surv_max == 1.0):
df.loc[df['PassengerId'] == passId, new_feature_name] = 1.0
elif(surv_min==0.0):
df.loc[df['PassengerId'] == passId, new_feature_name] = 0.0
return df<load_from_csv>
|
train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )
|
Digit Recognizer
|
2,404,415 |
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
combined = combine_df(train, test )<sort_values>
|
train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )
|
Digit Recognizer
|
2,404,415 |
train[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<filter>
|
Y_train = train['label']
|
Digit Recognizer
|
2,404,415 |
adults = train[train['Age'] >= CHILD_AGE_END]
children = train[train['Age'] < CHILD_AGE_END]
print('Proportion of passengers <{} who survived: {:.4f}'.format(CHILD_AGE_END, children['Survived'].mean()))
print('Proportion of passengers >={} who survived: {:.4f}'.format(CHILD_AGE_END, adults['Survived'].mean()))<sort_values>
|
X_train = train.drop(labels=['label'], axis =1)
del train
|
Digit Recognizer
|
2,404,415 |
train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<filter>
|
X_train = X_train/255.0
test = test/255.0
|
Digit Recognizer
|
2,404,415 |
combined[combined['Fare'].isnull() ]<data_type_conversions>
|
Y_train = to_categorical(Y_train , num_classes = 10 )
|
Digit Recognizer
|
2,404,415 |
combined['Fare'] = combined['Fare'].fillna(med_fare[3][0][0] )<filter>
|
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state = 42 )
|
Digit Recognizer
|
2,404,415 |
combined[combined['Embarked'].isnull() ]<data_type_conversions>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5), padding = 'same', activation = 'relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5), padding = 'same', activation = 'relu'))
model.add(MaxPool2D(pool_size =(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 32, kernel_size =(5,5), padding = 'same', activation = 'relu'))
model.add(Conv2D(filters = 32, kernel_size =(5,5), padding = 'same', activation = 'relu'))
model.add(MaxPool2D(pool_size =(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dropout(0.25))
model.add(Dense(10, activation = 'softmax'))
|
Digit Recognizer
|
2,404,415 |
combined['Embarked'] = combined['Embarked'].fillna('S' )<split>
|
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'] )
|
Digit Recognizer
|
2,404,415 |
train, test = divide_df(combined, TRAIN_LEN)
corr_train = get_corr(train)
corr_train[(corr_train['Feature 1'] == 'Age')|
(corr_train['Feature 2'] == 'Age')]<groupby>
|
epochs = 30
batch_size = 86
|
Digit Recognizer
|
2,404,415 |
age_pclass_sex = train.groupby(['Pclass', 'Sex'] ).median() ['Age']
print('Median ages for the following groups(training data):')
for pclass in range(1, train['Pclass'].nunique() + 1):
for sex in ['female', 'male']:
print('Pclass {} {}s: {}'.format(pclass, sex, age_pclass_sex[pclass][sex]))
print('All passengers: {}'.format(train['Age'].median()))<categorify>
|
datagen = ImageDataGenerator(rotation_range = 10,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1
)
datagen.fit(X_train)
|
Digit Recognizer
|
2,404,415 |
combined['Age'] = combined.groupby(['Pclass', 'Sex'])['Age'].apply(lambda x: x.fillna(x.median()))<feature_engineering>
|
history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size = batch_size), epochs = epochs, validation_data =(X_val, Y_val), steps_per_epoch=X_train.shape[0] // batch_size )
|
Digit Recognizer
|
2,404,415 |
combined['Deck'] = combined['Cabin'].apply(lambda s: s[0] if pd.notnull(s)else 'N')
train, test = divide_df(combined, TRAIN_LEN)
deck_class_count = train.groupby(['Deck', 'Pclass'] ).count().rename(columns={'Name': 'Count'})
deck_class_count = deck_class_count[['Count']]
print('Passenger counts for each Deck, Pclass combination(training data)where N deck denotes null values:',
deck_class_count, sep='
' )<concatenate>
|
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results, name = 'Label' )
|
Digit Recognizer
|
2,404,415 |
<split><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False )
|
Digit Recognizer
|
1,877,522 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<groupby>
|
%matplotlib inline
|
Digit Recognizer
|
1,877,522 |
deck_class_surv_count = train[['Deck', 'Pclass', 'Survived']]
deck_class_surv_count = deck_class_surv_count.groupby(['Deck', 'Pclass'] ).sum()
deck_class_surv_count, _ = get_deck_class_count(deck_class_surv_count, False)
deck_class_surv_prop = deck_class_surv_count.copy()
for deck in DECKS:
for pclass in deck_class_count[deck].keys() :
try:
deck_class_surv_prop[deck][pclass] = round(( deck_class_surv_count[deck][pclass] /
deck_class_count[deck][pclass]), 2)
except ZeroDivisionError:
pass
print('Decimal percent of passengers survived for each 'Deck', 'Pclass' combination(training set)':')
display(deck_class_surv_prop )<feature_engineering>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' )
|
Digit Recognizer
|
1,877,522 |
combined = combine_df(train, test)
combined['DeckPclassSurvProp'] = combined.apply(lambda x: get_surv_prop(x['Deck'], x['Pclass']),axis=1 )<split>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' )
|
Digit Recognizer
|
1,877,522 |
combined['FamilySize'] = combined['SibSp'] + combined['Parch'] + 1
COLS_TO_DROP.extend(['SibSp', 'Parch'])
train, test = divide_df(combined, TRAIN_LEN )<count_values>
|
y_train = train['label']
x_train = train.drop('label', axis=1)
y_train.shape, x_train.shape
|
Digit Recognizer
|
1,877,522 |
combined = combine_df(train, test)
combined['Title'] = combined['Name'].apply(lambda name: name.split(',')[1].split('.')[0].strip())
print('Count of passenger titles aboard the Titanic:')
combined['Title'].value_counts()<define_variables>
|
y_train.value_counts()
|
Digit Recognizer
|
1,877,522 |
normalized_titles = {
"Capt": "Officer",
"Col": "Officer",
"Don": "Royalty",
"Dona": "Royalty",
"Dr": "Officer",
"Jonkheer": "Royalty",
"Lady" : "Royalty",
"Major": "Officer",
"Master" : "Master",
"Miss" : "Miss",
"Mlle": "Miss",
"Mme": "Mrs",
"Mr" : "Mr",
"Mrs" : "Mrs",
"Ms": "Mrs",
"Rev": "Officer",
"Sir" : "Royalty",
"the Countess": "Royalty"}
combined['Title'] = combined['Title'].map(normalized_titles)
print('Count of updated passenger titles aboard the Titanic:')
combined['Title'].value_counts()<sort_values>
|
x_train = x_train/255.0
test = test/255.0
|
Digit Recognizer
|
1,877,522 |
print('Decimal percentages for survival based on passenger title:')
combined[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean() \
.sort_values(by='Survived', kind="quicksort", ascending=False )<feature_engineering>
|
y_train = pd.DataFrame(data=y_train)
one_hot = OneHotEncoder(handle_unknown='ignore')
one_hot.fit(y_train.values)
y_train = one_hot.transform(y_train.values ).toarray()
|
Digit Recognizer
|
1,877,522 |
combined['Surname'] = combined['Name'].apply(lambda x: str.split(x, ",")[0] )<filter>
|
random_seed = 3
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=random_seed)
x_train.shape, x_val.shape, y_train.shape, y_val.shape
|
Digit Recognizer
|
1,877,522 |
display(combined.loc[combined['Surname'] == 'Davies'] )<filter>
|
model = Sequential([
Conv2D(filters = 64, input_shape=(28,28,1), kernel_size=(3,3), strides=(1,1), padding='valid'),
Activation('relu'),
MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='valid'),
BatchNormalization() ,
Conv2D(filters=128, kernel_size=(3,3), strides=(1,1), padding='valid'),
Activation('relu'),
MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='valid'),
BatchNormalization() ,
Conv2D(filters=192, kernel_size=(1,1), strides=(1,1), padding='valid'),
Activation('relu'),
BatchNormalization() ,
Conv2D(filters=192, kernel_size=(3,3), strides=(1,1), padding='valid'),
Activation('relu'),
BatchNormalization() ,
Conv2D(filters=128, kernel_size=(3,3), strides=(1,1), padding='valid'),
Activation('relu'),
MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='valid'),
BatchNormalization() ,
Flatten() ,
Dense(2048),
Activation('relu'),
Dropout(0.4),
BatchNormalization() ,
Dense(2048),
Activation('relu'),
Dropout(0.4),
BatchNormalization() ,
Dense(800),
Activation('relu'),
Dropout(0.4),
BatchNormalization() ,
Dense(10),
Activation('softmax'),
] )
|
Digit Recognizer
|
1,877,522 |
combined.loc[combined['FamilySize'] == 11]<groupby>
|
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'] )
|
Digit Recognizer
|
1,877,522 |
combined = group_survivors(combined, ['Surname', 'Fare', 'FamilySize'], 'FamilySurvival')
print('Count of passengers with family survival data: ',
combined.loc[combined['FamilySurvival']!=0.5].shape[0] )<groupby>
|
model.fit(x_train, y_train, batch_size=200, validation_data=(x_val,y_val), epochs = 10 )
|
Digit Recognizer
|
1,877,522 |
combined = group_survivors(combined, ['Ticket'], 'GroupSurvival')
print('Count of passenger with group survival data: ',
combined[combined['GroupSurvival']!=0.5].shape[0] )<categorify>
|
results = model.predict(test )
|
Digit Recognizer
|
1,877,522 |
<categorify><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_submission1.csv",index=False )
|
Digit Recognizer
|
1,331,299 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
|
%matplotlib inline
|
Digit Recognizer
|
1,331,299 |
passengerid_test = test.PassengerId
COLS_TO_DROP.extend(['PassengerId', 'Name', 'Ticket', 'Surname'])
drop_cols(COLS_TO_DROP)
display(train.head() )<split>
|
train=pd.read_csv('.. /input/train.csv')
test=pd.read_csv('.. /input/test.csv' )
|
Digit Recognizer
|
1,331,299 |
y = train[['Survived']].copy()
train.drop(columns='Survived', inplace=True)
X_train, X_valid, y_train, y_valid = train_test_split(train, y, train_size=0.8, random_state=RNG_SEED)
X_test = test.copy()<choose_model_class>
|
train_images=train.iloc[:,1:].values
train_labels=train.iloc[:,0:1].values
test_X=test.iloc[:,:].values
|
Digit Recognizer
|
1,331,299 |
WANT_HYPERPARAMETERS = False
if WANT_HYPERPARAMETERS:
params = dict(max_depth = [n for n in range(3, 9)],
min_samples_split = [n for n in range(2, 4)],
min_samples_leaf = [n for n in range(2, 4)],
n_estimators = [20, 40, 60, 80],)
model = GridSearchCV(RandomForestClassifier(random_state=RNG_SEED),
params, cv=5, scoring='accuracy')
model.fit(X_train, y_train)
print(f'Best parameters {model_random_forest.best_params_}')
print(f'Mean cross-validated accuracy of the best parameters: {model.best_score_:.4f}')
else:
model = RandomForestClassifier(n_estimators=50, max_depth=5,
min_samples_leaf=2, min_samples_split=2,
random_state=RNG_SEED)
model.fit(X_train, y_train )<compute_train_metric>
|
train_images = train_images.reshape(( -1, 28, 28, 1))
train_images = train_images.astype('float32')/ 255
test = test.values.reshape(-1,28,28,1)
test = test / 255.0
|
Digit Recognizer
|
1,331,299 |
predictions_valid = model.predict(X_valid)
score = mean_absolute_error(y_valid, predictions_valid)
print('MAE for validation set prediction:', round(score, 4))
folds = 5
scores = -1 * cross_val_score(model, X_valid, y_valid, cv=folds, scoring='neg_mean_absolute_error')
print('Average MAE score(across experiments using {} folds):'.format(folds))
print(round(scores.mean() , 4))<save_to_csv>
|
train_labels = to_categorical(train_labels )
|
Digit Recognizer
|
1,331,299 |
predictions_test = model.predict(X_test)
predictions_test = predictions_test.astype(int)
output = pd.DataFrame({'PassengerId': passengerid_test,
'Survived': predictions_test})
output.to_csv('submission.csv', index=False )<feature_engineering>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(train_images )
|
Digit Recognizer
|
1,331,299 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["KMP_SETTINGS"] = "false"<load_from_csv>
|
model = models.Sequential()
model.add(layers.Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',activation ='relu', input_shape =(28,28,1)))
model.add(layers.Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',activation ='relu'))
model.add(layers.MaxPool2D(pool_size=(2,2)))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(layers.Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(layers.Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(layers.MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dropout(0.25))
model.add(layers.Dense(256, activation='relu',kernel_regularizer=regularizers.l2(0.001)))
model.add(layers.Dropout(0.35))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'] )
|
Digit Recognizer
|
1,331,299 |
train_file_path = ".. /input/titanic/train.csv"
X = pd.read_csv(train_file_path)
features = ["Pclass", "Sex", "Age", "Survived"]
X = pd.get_dummies(X[features])
X = pd.concat([X.drop('Pclass', axis=1), pd.get_dummies(X['Pclass'], prefix="Pclass")], axis=1)
imputer = SimpleImputer()
imputed_X = pd.DataFrame(imputer.fit_transform(X))
imputed_X.columns = X.columns
X = imputed_X
learnoutput = X.Survived
learninput = X.drop("Survived", axis=1)
test_file_path = ".. /input/titanic/test.csv"
test_data = pd.read_csv(test_file_path)
features = ["Pclass", "Sex", "Age"]
test = pd.get_dummies(test_data[features])
testinput = pd.concat([test.drop('Pclass', axis=1), pd.get_dummies(test['Pclass'], prefix="Pclass")], axis=1 )<split>
|
history = model.fit_generator(datagen.flow(train_images,train_labels, batch_size=64),
epochs = 100,
verbose = 2, steps_per_epoch=train_images.shape[0] // 64
)
|
Digit Recognizer
|
1,331,299 |
x_train_split, x_val_split, y_train_split, y_val_split = train_test_split(learninput, learnoutput, random_state=0 )<choose_model_class>
|
results = model.predict(test )
|
Digit Recognizer
|
1,331,299 |
model = keras.Sequential([
layers.Dense(
units=128,
activation = "tanh",
input_shape = [6]
),
layers.Dropout(0.2),
layers.Dense(
units = 256,
activation = "tanh",
),
layers.Dropout(0.2),
layers.Dense(
units = 512,
activation = "tanh",
),
layers.Dropout(0.2),
layers.Dense(
units = 1024,
activation = "tanh",
),
layers.Dropout(0.2),
layers.Dense(
units = 1,
activation = "sigmoid"
)
] )<train_model>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False )
|
Digit Recognizer
|
1,331,299 |
<compute_test_metric><EOS>
|
model.save('digit_clfr.h5' )
|
Digit Recognizer
|
1,301,491 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<save_to_csv>
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
|
Digit Recognizer
|
1,301,491 |
predictions = model.predict(testinput)
predictions = predictions.ravel()
predictions = predictions.round()
predictions = np.nan_to_num(predictions)
predictions = predictions.astype(int)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, "Survived": predictions})
output.to_csv("./file1.csv", index=False )<train_model>
|
train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )
|
Digit Recognizer
|
1,301,491 |
print(f'Training path:{__training_path}
Test path:{__test_path}' )<install_modules>
|
Y_train = train["label"]
X_train = train.drop(labels = ["label"],axis = 1)
X_train = X_train / 255.0
X_test = test / 255.0
X_train = X_train.values.reshape(-1,28,28,1)
X_test = X_test.values.reshape(-1,28,28,1)
Y_train = to_categorical(Y_train, num_classes = 10 )
|
Digit Recognizer
|
1,301,491 |
!{sys.executable} -m pip install --upgrade scikit-learn=="0.24.2"<import_modules>
|
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.10,
width_shift_range=0.1,
height_shift_range=0.1 )
|
Digit Recognizer
|
1,301,491 |
import sklearn; sklearn.show_versions()<load_from_csv>
|
nets = 15
model = [0] *nets
for j in range(nets):
model[j] = Sequential()
model[j].add(Conv2D(32, kernel_size = 3, activation='relu', input_shape =(28, 28, 1)))
model[j].add(BatchNormalization())
model[j].add(Conv2D(32, kernel_size = 3, activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Dropout(0.4))
model[j].add(Conv2D(64, kernel_size = 3, activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Conv2D(64, kernel_size = 3, activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Dropout(0.4))
model[j].add(Conv2D(128, kernel_size = 4, activation='relu'))
model[j].add(BatchNormalization())
model[j].add(Flatten())
model[j].add(Dropout(0.4))
model[j].add(Dense(10, activation='softmax'))
model[j].compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
1,301,491 |
def __load__data(__training_path, __test_path, concat=False):
__train_dataset = pd.read_csv(__training_path, delimiter=',')
__test_dataset = pd.read_csv(__test_path, delimiter=',')
return __train_dataset, __test_dataset
__train_dataset, __test_dataset = __load__data(__training_path, __test_path, concat=True)
__train_dataset.head()<define_variables>
|
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x)
history = [0] * nets
epochs = 45
for j in range(nets):
X_train2, X_val2, Y_train2, Y_val2 = train_test_split(X_train, Y_train, test_size = 0.1)
history[j] = model[j].fit_generator(datagen.flow(X_train2,Y_train2, batch_size=64),
epochs = epochs, steps_per_epoch = X_train2.shape[0]//64,
validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0)
print("CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format(
j+1,epochs,max(history[j].history['acc']),max(history[j].history['val_acc'])) )
|
Digit Recognizer
|
1,301,491 |
<drop_column><EOS>
|
results = np.zeros(( X_test.shape[0],10))
for j in range(nets):
results = results + model[j].predict(X_test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("MNIST-CNN-ENSEMBLE.csv",index=False )
|
Digit Recognizer
|
202,132 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<data_type_conversions>
|
%matplotlib inline
|
Digit Recognizer
|
202,132 |
_NUMERIC_COLS_WITH_MISSING_VALUES = ['Age', 'Fare', 'Parch', 'Pclass', 'SibSp']
for _col in _NUMERIC_COLS_WITH_MISSING_VALUES:
__simple_imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
__train_dataset[_col] = __simple_imputer.fit_transform(__train_dataset[_col].values.reshape(-1,1)) [:,0]
if _col in __test_dataset:
__test_dataset[_col] = __simple_imputer.transform(__test_dataset[_col].astype(__train_dataset[_col].dtypes ).values.reshape(-1,1)) [:,0]<data_type_conversions>
|
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
|
Digit Recognizer
|
202,132 |
_STRING_COLS_WITH_MISSING_VALUES = ['Cabin', 'Ticket', 'Sex', 'Name', 'Embarked']
for _col in _STRING_COLS_WITH_MISSING_VALUES:
__simple_imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
__train_dataset[_col] = __simple_imputer.fit_transform(__train_dataset[_col].values.reshape(-1,1)) [:,0]
if _col in __test_dataset:
__test_dataset[_col] = __simple_imputer.transform(__test_dataset[_col].astype(__train_dataset[_col].dtypes ).values.reshape(-1,1)) [:,0]<categorify>
|
train_file = ".. /input/train.csv"
test_file = ".. /input/test.csv"
output_file = "submission.csv"
|
Digit Recognizer
|
202,132 |
_CATEGORICAL_COLS = ['Sex', 'Embarked']
_ohe = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
__train_dataset[_CATEGORICAL_COLS] = pd.DataFrame(_ohe.fit_transform(__train_dataset[_CATEGORICAL_COLS]), columns=_CATEGORICAL_COLS)
__test_dataset[_CATEGORICAL_COLS] = pd.DataFrame(_ohe.transform(__test_dataset[_CATEGORICAL_COLS]), columns=_CATEGORICAL_COLS )<feature_engineering>
|
raw_data = np.loadtxt(train_file, skiprows=1, dtype='int', delimiter=',')
x_train, x_val, y_train, y_val = train_test_split(
raw_data[:,1:], raw_data[:,0], test_size=0.1 )
|
Digit Recognizer
|
202,132 |
_TEXT_COLUMNS = ['Name', 'Ticket', 'Cabin']
def process_text(__dataset):
for _col in _TEXT_COLUMNS:
process_text = [t.lower() for t in __dataset[_col]]
table = str.maketrans('', '', string.punctuation)
process_text = [t.translate(table)for t in process_text]
process_text = [re.sub(r'\d+', 'num', t)for t in process_text]
__dataset[_col] = process_text
return __dataset
__train_dataset = process_text(__train_dataset)
__test_dataset = process_text(__test_dataset )<prepare_x_and_y>
|
x_train = x_train.astype("float32")/255.
x_val = x_val.astype("float32")/255 .
|
Digit Recognizer
|
202,132 |
__feature_train = __train_dataset.drop(['Survived'], axis=1)
__target_train =__train_dataset['Survived']
__feature_test = __test_dataset<feature_engineering>
|
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
print(y_train[0] )
|
Digit Recognizer
|
202,132 |
_TEXT_COLUMNS = ['Cabin', 'Ticket', 'Name']
__temp_train_data = __feature_train[_TEXT_COLUMNS]
__feature_train.drop(_TEXT_COLUMNS, axis=1, inplace=True)
__feature_train_object_array = []
__temp_test_data = __feature_test[_TEXT_COLUMNS]
__feature_test.drop(_TEXT_COLUMNS, axis=1, inplace=True)
__feature_test_object_array = []
for _col in _TEXT_COLUMNS:
__tfidfvectorizer = TfidfVectorizer(max_features=3000)
vector_train = __tfidfvectorizer.fit_transform(__temp_train_data[_col])
__feature_train_object_array.append(vector_train)
vector_test = __tfidfvectorizer.transform(__temp_test_data[_col])
__feature_test_object_array.append(vector_test)
__feature_train = sparse.hstack([__feature_train] + __feature_train_object_array ).tocsr()
__feature_test = sparse.hstack([__feature_test] + __feature_test_object_array ).tocsr()<predict_on_test>
|
model = Sequential()
model.add(Conv2D(filters = 16, kernel_size =(3, 3), activation='relu',
input_shape =(28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(filters = 16, kernel_size =(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(strides=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
202,132 |
__model = RandomForestClassifier()
__model.fit(__feature_train, __target_train)
__y_pred = __model.predict(__feature_test )<prepare_output>
|
datagen = ImageDataGenerator(zoom_range = 0.1,
height_shift_range = 0.1,
width_shift_range = 0.1,
rotation_range = 10 )
|
Digit Recognizer
|
202,132 |
submission = pd.DataFrame(columns=['PassengerId'], data=__test_dataset_submission_columns)
submission = pd.concat([submission, pd.DataFrame(__y_pred, columns=['Survived'])], axis=1)
submission.head()<save_to_csv>
|
model.compile(loss='categorical_crossentropy', optimizer = Adam(lr=1e-4), metrics=["accuracy"] )
|
Digit Recognizer
|
202,132 |
submission.to_csv("kaggle_submission.csv", index=False )<import_modules>
|
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x )
|
Digit Recognizer
|
202,132 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt<load_from_csv>
|
hist = model.fit_generator(datagen.flow(x_train, y_train, batch_size=16),
steps_per_epoch=500,
epochs=20,
verbose=2,
validation_data=(x_val[:400,:], y_val[:400,:]),
callbacks=[annealer] )
|
Digit Recognizer
|
202,132 |
train_dataset = pd.read_csv('.. /input/titanic/train.csv')
train_dataset.head()
<load_from_csv>
|
final_loss, final_acc = model.evaluate(x_val, y_val, verbose=0)
print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
|
Digit Recognizer
|
202,132 |
test_dataset = pd.read_csv('.. /input/titanic/test.csv')
test_dataset.head()<prepare_x_and_y>
|
y_hat = model.predict(x_val)
y_pred = np.argmax(y_hat, axis=1)
y_true = np.argmax(y_val, axis=1)
cm = confusion_matrix(y_true, y_pred)
print(cm )
|
Digit Recognizer
|
202,132 |
X = train_dataset[['Pclass','Sex','Age','Fare','SibSp','Parch', 'Embarked', 'Name', 'Cabin']]
print(X)
y = train_dataset.iloc[:, 1].values
print(y[0:10])
X_test = test_dataset[['Pclass','Sex','Age','Fare','SibSp','Parch', 'Embarked','Name','Cabin']]
print(X_test)
<feature_engineering>
|
mnist_testset = np.loadtxt(test_file, skiprows=1, dtype='int', delimiter=',')
x_test = mnist_testset.astype("float32")
x_test = x_test.reshape(-1, 28, 28, 1)/255 .
|
Digit Recognizer
|
202,132 |
X['Age'] = X['Age'].fillna(( X['Age'].mean()))
X["Embarked"] = X["Embarked"].fillna("S")
X.loc[X["Embarked"] == "S", "Embarked"] = 0
X.loc[X["Embarked"] == "C", "Embarked"] = 1
X.loc[X["Embarked"] == "Q", "Embarked"] = 2
X_test['Age'] = X_test['Age'].fillna(( X_test['Age'].mean()))
X_test["Embarked"] = X_test["Embarked"].fillna("S")
X_test.loc[X_test["Embarked"] == "S", "Embarked"] = 0
X_test.loc[X_test["Embarked"] == "C", "Embarked"] = 1
X_test.loc[X_test["Embarked"] == "Q", "Embarked"] = 2
data = [X, X_test]
titles = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in data:
dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False)
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr','Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
dataset['Title'] = dataset['Title'].map(titles)
dataset['Title'] = dataset['Title'].fillna(0)
dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int)
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
dataset['Age'] = dataset['Age'].astype(int)
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
dataset['Fare'] = dataset['Fare'].fillna(dataset['Fare'].median())
dataset['Cabin'] = dataset['Cabin'].str[:1]
X = X.drop(['Name'], axis=1)
X_test = X_test.drop(['Name'], axis=1)
print(X)
print(X_test)
<categorify>
|
y_hat = model.predict(x_test, batch_size=64 )
|
Digit Recognizer
|
202,132 |
cabin_mapping = {"A": 0, "B": 0.4, "C": 0.8, "D": 1.2, "E": 1.6, "F": 2, "G": 2.4, "T": 2.8}
data = [X, X_test]
for dataset in data:
dataset['Cabin'] = dataset['Cabin'].map(cabin_mapping)
X["Cabin"].fillna(X.groupby("Pclass")["Cabin"].transform("median"), inplace=True)
X_test["Cabin"].fillna(X_test.groupby("Pclass")["Cabin"].transform("median"), inplace=True )<feature_engineering>
|
y_pred = np.argmax(y_hat,axis=1 )
|
Digit Recognizer
|
202,132 |
<feature_engineering><EOS>
|
with open(output_file, 'w')as f :
f.write('ImageId,Label
')
for i in range(len(y_pred)) :
f.write("".join([str(i+1),',',str(y_pred[i]),'
']))
|
Digit Recognizer
|
10,050,741 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
|
import numpy as np
import pandas as pd
|
Digit Recognizer
|
10,050,741 |
features_drop = [ 'SibSp', 'Parch', 'FamilySize', 'FareBand']
X = X.drop(features_drop, axis=1)
X_test = X_test.drop(features_drop, axis=1)
print(X)
print(X_test )<normalization>
|
train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
10,050,741 |
sc = StandardScaler()
X = sc.fit_transform(X)
print('X')
print(X)
X_test = sc.fit_transform(X_test)
print('X_test')
print(X_test )<compute_train_metric>
|
import matplotlib.pyplot as plt
|
Digit Recognizer
|
10,050,741 |
classifier = LogisticRegression(random_state = 0)
classifier.fit(X, y)
y_pred_l_reg = classifier.predict(X_test)
acc_l_reg = round(classifier.score(X, y)* 100, 2)
print(str(acc_l_reg)+ ' percent')
<compute_train_metric>
|
tr_sample = train.drop('label', axis=1 ).values.reshape(-1,28,28)[0]
ts_sample = test.values.reshape(-1,28,28)[0]
|
Digit Recognizer
|
10,050,741 |
clf = SVC()
clf.fit(X, y)
y_pred_svc = clf.predict(X_test)
acc_svc = round(clf.score(X, y)* 100, 2)
print(acc_svc )<predict_on_test>
|
test['pixel345'].value_counts()
|
Digit Recognizer
|
10,050,741 |
clf = KNeighborsClassifier(n_neighbors = 3)
clf.fit(X, y)
y_pred_knn = clf.predict(X_test)
acc_knn = round(clf.score(X, y)* 100, 2)
print(acc_knn)
<choose_model_class>
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
10,050,741 |
clf = DecisionTreeClassifier()
clf.fit(X, y)
y_pred_decision_tree = clf.predict(X_test)
acc_decision_tree = round(clf.score(X, y)* 100, 2)
print(acc_decision_tree)
<predict_on_test>
|
x_test = test.values.reshape(-1,28,28,1)
x_test = x_test/255
x_train_full = train.drop('label', axis=1 ).values.reshape(-1,28,28,1)
y_train_full = train['label'].values
x_train, x_val, y_train, y_val = train_test_split(x_train_full, y_train_full, test_size=0.2, random_state=42 )
|
Digit Recognizer
|
10,050,741 |
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X, y)
y_pred_random_forest = clf.predict(X_test)
acc_random_forest = round(clf.score(X, y)* 100, 2)
print(acc_random_forest )<compute_train_metric>
|
batch_size=64
img_gen = ImageDataGenerator(rescale=1/255,
rotation_range=30,
zoom_range=.1,
shear_range=.1,
width_shift_range=.25,
height_shift_range=.25)
train_gen = img_gen.flow(x_train, y_train,
batch_size=batch_size)
valid_gen = img_gen.flow(x_val, y_val,
batch_size=batch_size,
shuffle=False )
|
Digit Recognizer
|
10,050,741 |
sgd = SGDClassifier()
sgd.fit(X, y)
Y_pred = sgd.predict(X_test)
acc_sgd = round(sgd.score(X, y)* 100, 2)
print(acc_sgd )<save_to_csv>
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Conv2D, MaxPool2D, Dropout, BatchNormalization, AveragePooling2D, GlobalAveragePooling2D
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard
|
Digit Recognizer
|
10,050,741 |
output = pd.DataFrame({'PassengerId': test_dataset.PassengerId, 'Survived': y_pred_l_reg})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved" )<import_modules>
|
early_stop = EarlyStopping(monitor='val_loss', patience=5, mode='min',restore_best_weights=True)
check_point = ModelCheckpoint('digit_reg_mnist_z.h5', monitor='val_accuracy', save_best_only=True)
lr_plateau = ReduceLROnPlateau(monitor='val_accuracy',
patience=2,
factor=.2,
min_lr=1e-6 )
|
Digit Recognizer
|
10,050,741 |
import matplotlib.pyplot as plt
import sklearn.model_selection
from sklearn.preprocessing import MinMaxScaler
from sklearn.impute import KNNImputer
from sklearn.metrics import accuracy_score
import seaborn as sns
import numpy as np
import pandas as pd<load_from_csv>
|
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), input_shape=(28,28,1), padding='same'))
model.add(BatchNormalization(momentum=.9, epsilon=1e-5))
model.add(Activation('relu'))
model.add(Conv2D(64, kernel_size=(3,3), padding='same'))
model.add(BatchNormalization(momentum=.9, epsilon=1e-5))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Conv2D(64, kernel_size=(3,3), padding='same'))
model.add(BatchNormalization(momentum=.9, epsilon=1e-5))
model.add(Activation('relu'))
model.add(Conv2D(128, kernel_size=(3,3), padding='same'))
model.add(BatchNormalization(momentum=.9, epsilon=1e-5))
model.add(Activation('relu'))
model.add(AveragePooling2D(pool_size=(2,2)))
model.add(Conv2D(128, kernel_size=(3,3), padding='same'))
model.add(BatchNormalization(momentum=.9, epsilon=1e-5))
model.add(Activation('relu'))
model.add(Conv2D(128, kernel_size=(3,3), padding='same'))
model.add(BatchNormalization(momentum=.9, epsilon=1e-5))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Conv2D(256, kernel_size=(3,3), padding='same'))
model.add(BatchNormalization(momentum=.9, epsilon=1e-5))
model.add(Activation('relu'))
model.add(Conv2D(256, kernel_size=(3,3), padding='same'))
model.add(BatchNormalization(momentum=.9, epsilon=1e-5))
model.add(Activation('relu'))
model.add(GlobalAveragePooling2D())
model.add(Dense(10, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
model.summary()
|
Digit Recognizer
|
10,050,741 |
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()<load_from_csv>
|
model.fit(train_gen,
epochs=100,
steps_per_epoch=250,
validation_data=valid_gen,
callbacks=[lr_plateau, early_stop] )
|
Digit Recognizer
|
10,050,741 |
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()<concatenate>
|
import seaborn as sns
from sklearn.metrics import classification_report, confusion_matrix
|
Digit Recognizer
|
10,050,741 |
all_data = pd.concat([train_data, test_data])
all_data.head()<compute_test_metric>
|
eval_df = pd.DataFrame(model.history.history)
length = len(eval_df )
|
Digit Recognizer
|
10,050,741 |
survival_rate = train_data["Survived"].mean()
print(f"Survival rate: {survival_rate}")
print(f"Death rate: {1-survival_rate}" )<define_variables>
|
pred = np.argmax(model.predict(valid_gen), axis=1)
pred
|
Digit Recognizer
|
10,050,741 |
women = train_data.loc[train_data.Sex == 'female']["Survived"]
rate_women = sum(women)/len(women)
print("% of women who survived:", rate_women )<filter>
|
print(classification_report(y_val, pred))
|
Digit Recognizer
|
10,050,741 |
train_data.loc[train_data["Sex"] == "male"]["Embarked"]<normalization>
|
lr_plateau = ReduceLROnPlateau(monitor='accuracy',
patience=2,
factor=.2,
min_lr=1e-6)
full_train_gen = img_gen.flow(x_train_full, y_train_full,
batch_size=batch_size )
|
Digit Recognizer
|
10,050,741 |
features = ["Pclass", "Sex", "SibSp", "Parch", "Age", "Embarked"]
X_train = pd.get_dummies(train_data[features])
imputer = KNNImputer(n_neighbors=5, weights='uniform', metric='nan_euclidean')
X_train = imputer.fit_transform(X_train)
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
y_train = np.array(train_data["Survived"])
print(f"Shape of the training set: {X_train.shape}" )<train_on_grid>
|
model.fit(full_train_gen,
epochs=22,
steps_per_epoch=250,
callbacks=[lr_plateau] )
|
Digit Recognizer
|
10,050,741 |
def trainClassifier(X_train, y_train, model_name, classifier, params, score, verbose=False, num_folds=10):
kf = sklearn.model_selection.StratifiedKFold(num_folds)
train_scores = []
best_score = 0
for config in sklearn.model_selection.ParameterGrid(params):
train_scores_run = []
counts = []
for train_indices, valid_indices in kf.split(X_train, y_train):
counts.append(len(train_indices))
X_train_kf = X_train[train_indices]
y_train_kf = y_train[train_indices]
X_valid_kf = X_train[valid_indices]
y_valid_kf = y_train[valid_indices]
model = classifier(**config)
model.fit(X_train_kf, y_train_kf)
y_hat = model.predict(X_valid_kf)
train_score = score(y_valid_kf, y_hat)
train_scores_run.append(train_score)
if np.average(train_scores_run, weights=counts)> best_score:
best_score = np.average(train_scores_run, weights=counts)
best_config = config
if(verbose):
print("New best score obtained")
print(f"Training with: {config}")
print(f"Total Score obtained with cross validation: {best_score}
")
train_scores.append(np.average(train_scores_run, weights=counts))
output_df = pd.DataFrame(data = [[model_name, best_config ,best_score]], \
columns=["model_name", "parameters", "training_score"])
return output_df<create_dataframe>
|
real_pred = np.argmax(model.predict(x_test), axis=1 )
|
Digit Recognizer
|
10,050,741 |
<choose_model_class><EOS>
|
submit_df = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv')
submit_df['Label'] = real_pred
submit_df.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
9,988,485 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
|
np.random.seed(0)
%matplotlib inline
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.