kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
6,485,130 |
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()<define_variables>
|
learn = cnn_learner(data, base_arch = models.resnet34, metrics = accuracy,model_dir="/tmp/models", callback_fns=ShowGraph)
|
Digit Recognizer
|
6,485,130 |
women = train_data.loc[train_data.Sex == 'female']["Survived"]
rate_women = sum(women)/len(women)
print("% of women who survived:", rate_women )<define_variables>
|
doc(fit_one_cycle )
|
Digit Recognizer
|
6,485,130 |
men = train_data.loc[train_data.Sex == 'male']["Survived"]
rate_men = sum(men)/len(men)
print("% of men who survived:", rate_men )<count_missing_values>
|
learn.fit_one_cycle(3 )
|
Digit Recognizer
|
6,485,130 |
for column in train_data.columns:
print("%-15s%3d" %(column, pd.isnull(train_data[column] ).sum()))<count_missing_values>
|
learn.save('model1' )
|
Digit Recognizer
|
6,485,130 |
for column in test_data.columns:
print("%-15s%3d" %(column, pd.isnull(test_data[column] ).sum()))<count_duplicates>
|
learn.load('model1' )
|
Digit Recognizer
|
6,485,130 |
print(train_data.drop_duplicates().groupby('Embarked')['PassengerId'].count() )<feature_engineering>
|
learn.lr_find()
|
Digit Recognizer
|
6,485,130 |
train_data['Embarked'] = train_data['Embarked'].fillna("S" )<feature_engineering>
|
learn.fit_one_cycle(30 , slice(1e-3, 1e-2))
|
Digit Recognizer
|
6,485,130 |
test_data['Fare'] = test_data['Fare'].fillna(0 )<feature_engineering>
|
interp = ClassificationInterpretation.from_learner(learn )
|
Digit Recognizer
|
6,485,130 |
train_data['TicketLength'] = train_data['Ticket'].apply(lambda x: len(x))<feature_engineering>
|
class_score , y = learn.get_preds(DatasetType.Test )
|
Digit Recognizer
|
6,485,130 |
test_data['TicketLength'] = test_data['Ticket'].apply(lambda x: len(x))<save_to_csv>
|
probabilities = class_score[0].tolist()
[f"{index}: {probabilities[index]}" for index in range(len(probabilities)) ]
|
Digit Recognizer
|
6,485,130 |
y = train_data["Survived"]
features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "TicketLength"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!" )<import_modules>
|
class_score = np.argmax(class_score, axis=1 )
|
Digit Recognizer
|
6,485,130 |
import random
import html
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import tensorflow as tf
import tensorflow.keras.backend as K
import os
from scipy.stats import spearmanr
from scipy.optimize import minimize
from transformers import *
from tensorflow.keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling1D
from tensorflow.keras.models import Model
from sklearn.model_selection import KFold
from scipy.stats import spearmanr<set_options>
|
ImageId = [os.path.splitext(path)[0] for path in os.listdir(TEST)]
ImageId = [int(path)for path in ImageId]
ImageId = [ID+1 for ID in ImageId]
|
Digit Recognizer
|
6,485,130 |
<set_options><EOS>
|
submission = pd.DataFrame({
"ImageId": ImageId,
"Label": class_score
})
submission.to_csv("submission.csv", index=False)
display(submission.head(3))
display(submission.tail(3))
|
Digit Recognizer
|
9,904,099 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
9,904,099 |
seed = 13
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed )<load_from_csv>
|
df_train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
df_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
9,904,099 |
def get_data() :
print('getting test and train data...')
path = '.. /input/google-quest-challenge/'
train = pd.read_csv(path+'train.csv')
test = pd.read_csv(path+'test.csv')
submission = pd.read_csv(path+'sample_submission.csv')
y = train[train.columns[11:]]
X = train[['question_title', 'question_body', 'answer']]
X_test = test[['question_title', 'question_body', 'answer']]
X.question_body = X.question_body.apply(html.unescape)
X.question_title = X.question_title.apply(html.unescape)
X.answer = X.answer.apply(html.unescape)
X_test.question_body = X_test.question_body.apply(html.unescape)
X_test.question_title = X_test.question_title.apply(html.unescape)
X_test.answer = X_test.answer.apply(html.unescape)
return X, X_test, y, train, test<load_pretrained>
|
X_train = df_train.drop('label', axis=1 ).values
y_train = df_train['label'].values.reshape(-1,1)
X_test = df_test.values
|
Digit Recognizer
|
9,904,099 |
def get_tokenizer(model_name):
print(f'getting tokenizer for {model_name}...')
if model_name == 'xlnet-base-cased':
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
elif model_name == 'roberta-base':
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
elif model_name == 'bert-base-uncased':
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
return tokenizer<categorify>
|
X_train = X_train.astype('float32')/255.0
X_test = X_test.astype('float32')/255.0
|
Digit Recognizer
|
9,904,099 |
def fix_length(tokens, max_sequence_length=512, q_max_len=254, a_max_len=254, model_type='questions'):
if model_type == 'questions':
length = len(tokens)
if length > max_sequence_length:
tokens = tokens[:max_sequence_length-1]
return tokens
else:
question_tokens, answer_tokens = tokens
q_len = len(question_tokens)
a_len = len(answer_tokens)
if q_len + a_len + 3 > max_sequence_length:
if a_max_len <= a_len and q_max_len <= q_len:
q_new_len_head = q_max_len//2
question_tokens = question_tokens[:q_new_len_head] + question_tokens[-q_new_len_head:]
a_new_len_head = a_max_len//2
answer_tokens = answer_tokens[:a_new_len_head] + answer_tokens[-a_new_len_head:]
elif q_len <= a_len and q_len < q_max_len:
a_max_len = a_max_len +(q_max_len - q_len - 1)
a_new_len_head = a_max_len//2
answer_tokens = answer_tokens[:a_new_len_head] + answer_tokens[-a_new_len_head:]
elif a_len < q_len:
q_max_len = q_max_len +(a_max_len - a_len - 1)
q_new_len_head = q_max_len//2
question_tokens = question_tokens[:q_new_len_head] + question_tokens[-q_new_len_head:]
return question_tokens, answer_tokens<categorify>
|
X_train = X_train.reshape(-1,28,28,1)
X_test = X_test.reshape(-1,28,28,1)
y_train = to_categorical(y_train)
target_count = y_train.shape[1]
|
Digit Recognizer
|
9,904,099 |
def transformer_inputs(title, question, answer, tokenizer, model_type='questions', MAX_SEQUENCE_LENGTH = 512):
if model_type == 'questions':
question = f"{title} [SEP] {question}"
question_tokens = tokenizer.tokenize(question)
question_tokens = fix_length(question_tokens, model_type=model_type)
ids_q = tokenizer.convert_tokens_to_ids(["[CLS]"] + question_tokens)
padded_ids =(ids_q + [tokenizer.pad_token_id] *(MAX_SEQUENCE_LENGTH - len(ids_q)))[:MAX_SEQUENCE_LENGTH]
token_type_ids =([0] * MAX_SEQUENCE_LENGTH)[:MAX_SEQUENCE_LENGTH]
attention_mask =([1] * len(ids_q)+ [0] *(MAX_SEQUENCE_LENGTH - len(ids_q)))[:MAX_SEQUENCE_LENGTH]
return padded_ids, token_type_ids, attention_mask
else:
question = f"{title} [SEP] {question}"
question_tokens = tokenizer.tokenize(question)
answer_tokens = tokenizer.tokenize(answer)
question_tokens, answer_tokens = fix_length(tokens=(question_tokens, answer_tokens), model_type=model_type)
ids = tokenizer.convert_tokens_to_ids(["[CLS]"] + question_tokens + ["[SEP]"] + answer_tokens + ["[SEP]"])
padded_ids = ids + [tokenizer.pad_token_id] *(MAX_SEQUENCE_LENGTH - len(ids))
token_type_ids = [0] *(1 + len(question_tokens)+ 1)+ [1] *(len(answer_tokens)+ 1)+ [0] *(MAX_SEQUENCE_LENGTH - len(ids))
attention_mask = [1] * len(ids)+ [0] *(MAX_SEQUENCE_LENGTH - len(ids))
return padded_ids, token_type_ids, attention_mask<categorify>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
9,904,099 |
def input_data(df, tokenizer, model_type='questions'):
print(f'generating {model_type} input for transformer...')
input_ids, input_token_type_ids, input_attention_masks = [], [], []
for title, body, answer in tqdm(zip(df["question_title"].values, df["question_body"].values, df["answer"].values)) :
ids, type_ids, mask = transformer_inputs(title, body, answer, tokenizer, model_type=model_type)
input_ids.append(ids)
input_token_type_ids.append(type_ids)
input_attention_masks.append(mask)
return(
np.asarray(input_ids, dtype=np.int32),
np.asarray(input_attention_masks, dtype=np.int32),
np.asarray(input_token_type_ids, dtype=np.int32))<load_pretrained>
|
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42 )
|
Digit Recognizer
|
9,904,099 |
def get_model(name):
if name == 'xlnet-base-cased':
config = XLNetConfig.from_pretrained('xlnet-base-cased', output_hidden_states=True)
model = TFXLNetModel.from_pretrained('xlnet-base-cased', config=config)
elif name == 'roberta-base':
config = RobertaConfig.from_pretrained('roberta-base', output_hidden_states=True)
model = TFRobertaModel.from_pretrained('roberta-base', config=config)
elif name == 'bert-base-uncased':
config = BertConfig.from_pretrained('bert-base-uncased', output_hidden_states=True)
model = TFBertModel.from_pretrained('bert-base-uncased', config=config)
return model<choose_model_class>
|
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='valid', activation='relu', input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(target_count, activation='softmax'))
optimizer = RMSprop(learning_rate=0.001,rho=0.99)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, verbose=1,patience=2, min_lr=0.00000001)
callback = EarlyStopping(monitor='loss', patience=5)
history = model.fit(datagen.flow(X_train,y_train, batch_size=64), epochs = 50, validation_data=(X_val, y_val), verbose = 1, callbacks=[reduce_lr, callback] )
|
Digit Recognizer
|
9,904,099 |
def create_model(name='xlnet-base-cased', model_type='questions'):
print(f'creating model {name}...')
K.clear_session()
max_seq_length = 512
input_tokens = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="input_tokens")
input_mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="input_mask")
input_segment = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="input_segment")
model = get_model(name)
if(name == 'xlnet-base-cased'):
sequence_output, hidden_states = model([input_tokens, input_mask])
elif(name=='roberta-base' and model_type!='questions'):
sequence_output, pooler_output, hidden_states = model([input_tokens, input_mask])
else:
sequence_output, pooler_output, hidden_states = model([input_tokens, input_mask, input_segment])
h12 = tf.reshape(hidden_states[-1][:,0],(-1,1,768))
h11 = tf.reshape(hidden_states[-2][:,0],(-1,1,768))
h10 = tf.reshape(hidden_states[-3][:,0],(-1,1,768))
h09 = tf.reshape(hidden_states[-4][:,0],(-1,1,768))
concat_hidden = tf.keras.layers.Concatenate(axis=2 )([h12, h11, h10, h09])
x = GlobalAveragePooling1D()(concat_hidden)
x = Dropout(0.2 )(x)
if model_type == 'answers':
output = Dense(9, activation='sigmoid' )(x)
elif model_type == 'questions':
output = Dense(21, activation='sigmoid' )(x)
else:
output = Dense(30, activation='sigmoid' )(x)
if(name == 'xlnet-base-cased')or(name=='roberta-base' and model_type!='questions'):
model = Model(inputs=[input_tokens, input_mask], outputs=output)
else:
model = Model(inputs=[input_tokens, input_mask, input_segment], outputs=output)
return model<categorify>
|
y_val_m = y_val.argmax(axis=1)
y_val_hat_prob = model.predict(X_val)
y_val_hat = y_val_hat_prob.argmax(axis=1)
X_val_inc = X_val[y_val_m != y_val_hat, :, :, :]
y_val_inc = y_val_m[y_val_m != y_val_hat]
y_val_hat_inc = y_val_hat[y_val_m != y_val_hat]
y_val_hat_prob_inc = y_val_hat_prob[y_val_m != y_val_hat]
|
Digit Recognizer
|
9,904,099 |
class data_generator:
def __init__(self, X, X_test, tokenizer, type_):
tokens, masks, segments = input_data(X_test, tokenizer, type_)
self.test_data = {'input_tokens': tokens,
'input_mask': masks,
'input_segment': segments}
self.tokens, self.masks, self.segments = input_data(X, tokenizer, type_)
def generate_data(tr, cv, name='xlnet-base-cased', model_type='questions'):
if name!='xlnet-base-cased':
train_data = {'input_tokens': self.tokens[tr],
'input_mask': self.masks[tr],
'input_segment': self.segments[tr]}
cv_data = {'input_tokens': self.tokens[cv],
'input_mask': self.masks[cv],
'input_segment': self.segments[cv]}
else:
train_data = {'input_tokens': self.tokens[tr],
'input_mask': self.masks[tr]}
cv_data = {'input_tokens': self.tokens[cv],
'input_mask': self.masks[cv]}
if model_type=='questions':
y_tr = y.values[tr, 21:]
y_cv = y.values[cv, 21:]
elif model_type=='answers':
y_tr = y.values[tr, 21:]
y_cv = y.values[cv, 21:]
else:
y_tr = y.values[tr]
y_cv = y.values[cv]
return train_data, cv_data, y_tr, y_cv<categorify>
|
for i in range(0,10):
act = y_val_inc[i]
pred = y_val_hat_inc[i]
print('Actual: {}; Confidence(act/pred): \t{} - {:.0f}% \t{} - {:.0f}%'.format(act, act, y_val_hat_prob_inc[i][act]*100, pred, y_val_hat_prob_inc[i][pred]*100))
|
Digit Recognizer
|
9,904,099 |
def optimize_ranks(preds, unique_labels):
print(f'optimizing the predicted values...')
new_preds = np.zeros(preds.shape)
for i in range(preds.shape[1]):
interpolate_bins = np.digitize(preds[:, i], bins=unique_labels, right=False)
if len(np.unique(interpolate_bins)) == 1:
new_preds[:, i] = preds[:, i]
else:
new_preds[:, i] = unique_labels[interpolate_bins]
return new_preds<count_unique_values>
|
y_test_hat = model.predict(X_test ).argmax(axis=1)
df_submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
df_submission['Label'] = y_test_hat.astype('int32')
df_submission.to_csv('Submission.csv', index=False)
print('Submission saved!' )
|
Digit Recognizer
|
9,904,099 |
def get_exp_labels(train):
X = train.iloc[:, 11:]
unique_labels = np.unique(X.values)
denominator = 60
q = np.arange(0, 101, 100 / denominator)
exp_labels = np.percentile(unique_labels, q)
return exp_labels<compute_test_metric>
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
9,904,099 |
def compute_spearmanr_ignore_nan(trues, preds):
rhos = []
for tcol, pcol in zip(np.transpose(trues), np.transpose(preds)) :
rhos.append(spearmanr(tcol, pcol ).correlation)
return np.nanmean(rhos )<compute_test_metric>
|
df_train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
df_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
9,904,099 |
def rhos(y, y_pred):
return tf.py_function(compute_spearmanr_ignore_nan,(y, y_pred), tf.double )<train_model>
|
X_train = df_train.drop('label', axis=1 ).values
y_train = df_train['label'].values.reshape(-1,1)
X_test = df_test.values
|
Digit Recognizer
|
9,904,099 |
def fit_model(model, model_name, model_type, data_gen, file_path, train, use_saved_weights=True):
path = '.. /input/google-qna-predicted-data/'
if use_saved_weights:
print(f'getting saved weights for {model_name}...')
model.load_weights(path+file_path)
else:
print(f'fitting data on {model_name}...')
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00002)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=[rhos])
kf = KFold(n_splits=5, random_state=42)
for tr, cv in kf.split(np.arange(train.shape[0])) :
tr_data, cv_data, y_tr, y_cv = data_gen.generate_data(tr, cv, model_name, model_type)
model.fit(tr_data, y_tr, epochs=1, batch_size=4, validation_data=(cv_data, y_cv))
model.save_weights(file_path)
return model<concatenate>
|
X_train = X_train.astype('float32')/255.0
X_test = X_test.astype('float32')/255.0
|
Digit Recognizer
|
9,904,099 |
def get_weighted_avg(model_predictions):
xlnet_q, xlnet_a, roberta_q, roberta_a, roberta_qa, bert_q, bert_a, bert_qa = model_predictions
xlnet_concat = np.concatenate(( xlnet_q, xlnet_a), axis=1)
bert_concat = np.concatenate(( bert_q, bert_a), axis=1)
roberta_concat = np.concatenate(( roberta_q, roberta_a), axis=1)
predict =(roberta_qa + bert_qa + xlnet_concat + bert_concat + roberta_concat)/5
return predict<define_variables>
|
X_train = X_train.reshape(-1,28,28,1)
X_test = X_test.reshape(-1,28,28,1)
y_train = to_categorical(y_train)
target_count = y_train.shape[1]
|
Digit Recognizer
|
9,904,099 |
def get_predictions(predictions_present=True, model_saved_weights_present=True):
msw = model_saved_weights_present
X, X_test, y, train, test = get_data()
path = '.. /input/google-qna-predicted-data/'
model_names = ['xlnet-base-cased', 'roberta-base', 'bert-base-uncased']
model_types = ['questions', 'answers', 'questions_answers']
saved_weights_names = ['xlnet_q.h5', 'xlnet_a.h5', 'roberta_q.h5', 'roberta_a.h5',
'roberta_qa.h5', 'bert_q.h5', 'bert_a.h5', 'bert_qa.h5']
saved_model_predictions = [path+'xlnet_q.csv', path+'xlnet_a.csv', path+'roberta_q.csv', path+'roberta_a.csv',
path+'roberta_qa.csv', path+'bert_q.csv', path+'bert_a.csv', path+'bert_qa.csv']
model_predictions = []
if predictions_present:
model_predictions = [pd.read_csv(file_name ).values for file_name in saved_model_predictions]
else:
i = 0
for name_ in model_names:
for type_ in model_types:
if name_ == 'xlnet-base-cased' and type_ == 'questions_answers':
continue
print('-'*100)
model = create_model(name_, type_)
tokenizer = get_tokenizer(name_)
data_gen = data_generator(X, X_test, tokenizer, type_)
model = fit_model(model, name_, type_, data_gen, saved_weights_names[i], train, msw)
print(f'getting target predictions from {name_}...')
model_predictions.append(model.predict(data_gen.test_data))
i+=1
predicted_labels = get_weighted_avg(model_predictions)
exp_labels = get_exp_labels(train)
optimized_predicted_labels = optimize_ranks(predicted_labels, exp_labels)
df = pd.concat([test['qa_id'], pd.DataFrame(optimized_predicted_labels, columns=train.columns[11:])], axis=1)
print('done...!')
return df<save_to_csv>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
9,904,099 |
submission = get_predictions(predictions_present=True)
<save_to_csv>
|
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42 )
|
Digit Recognizer
|
9,904,099 |
submission = pd.read_csv('.. /input/google-qna-predicted-data/output.csv')
sample_submission = pd.read_csv('.. /input/google-quest-challenge/sample_submission.csv')
sample_submission = pd.read_csv('.. /input/google-quest-challenge/sample_submission.csv')
id_in_sub = set(submission.qa_id)
id_in_sample_submission = set(sample_submission.qa_id)
diff = id_in_sample_submission - id_in_sub
sample_submission = pd.concat([submission, sample_submission[sample_submission.qa_id.isin(diff)]] ).reset_index(drop=True)
sample_submission.to_csv("submission.csv", index=False )<set_options>
|
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='valid', activation='relu', input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3,3), padding='valid', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(target_count, activation='softmax'))
optimizer = RMSprop(learning_rate=0.001,rho=0.99)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, verbose=1,patience=2, min_lr=0.00000001)
callback = EarlyStopping(monitor='loss', patience=5)
history = model.fit(datagen.flow(X_train,y_train, batch_size=64), epochs = 50, validation_data=(X_val, y_val), verbose = 1, callbacks=[reduce_lr, callback] )
|
Digit Recognizer
|
9,904,099 |
%matplotlib inline
<load_from_csv>
|
y_val_m = y_val.argmax(axis=1)
y_val_hat_prob = model.predict(X_val)
y_val_hat = y_val_hat_prob.argmax(axis=1)
X_val_inc = X_val[y_val_m != y_val_hat, :, :, :]
y_val_inc = y_val_m[y_val_m != y_val_hat]
y_val_hat_inc = y_val_hat[y_val_m != y_val_hat]
y_val_hat_prob_inc = y_val_hat_prob[y_val_m != y_val_hat]
|
Digit Recognizer
|
9,904,099 |
train_df = pd.read_csv(".. /input/GiveMeSomeCredit/cs-training.csv")
test_df = pd.read_csv(".. /input/GiveMeSomeCredit/cs-test.csv")
combine = [train_df, test_df]<create_dataframe>
|
for i in range(0,10):
act = y_val_inc[i]
pred = y_val_hat_inc[i]
print('Actual: {}; Confidence(act/pred): \t{} - {:.0f}% \t{} - {:.0f}%'.format(act, act, y_val_hat_prob_inc[i][act]*100, pred, y_val_hat_prob_inc[i][pred]*100))
|
Digit Recognizer
|
9,904,099 |
<count_missing_values><EOS>
|
y_test_hat = model.predict(X_test ).argmax(axis=1)
df_submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
df_submission['Label'] = y_test_hat.astype('int32')
df_submission.to_csv('Submission.csv', index=False)
print('Submission saved!' )
|
Digit Recognizer
|
9,782,103 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
|
%matplotlib inline
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
Digit Recognizer
|
9,782,103 |
def detect_outliers(df,n,features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col], 25)
Q3 = np.percentile(df[col],75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outlier_list_col = df[(df[col] < Q1 - outlier_step)|(df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list(k for k, v in outlier_indices.items() if v > n)
return multiple_outliers<define_variables>
|
ds_train = pd.read_csv('.. /input/digit-recognizer/train.csv')
ds_test = pd.read_csv('.. /input/digit-recognizer/test.csv')
|
Digit Recognizer
|
9,782,103 |
Outliers_to_drop = detect_outliers(train_df,3,train_df.columns.values[2:])
print("异常值:",round(len(Outliers_to_drop)/train_df.shape[0]*100,2),"%")
train_df.loc[Outliers_to_drop]<count_duplicates>
|
val_size = 0.1
X_all = ds_train.loc[:, ds_train.columns != 'label'].to_numpy()
y_all = ds_train['label'].to_numpy()
test = ds_test.to_numpy()
|
Digit Recognizer
|
9,782,103 |
train_df.duplicated().value_counts()<filter>
|
X_all = X_all / 255.0
X_all = X_all.reshape(-1,28,28,1)
test = test / 255.0
test = test.reshape(-1,28,28,1)
y_all = to_categorical(y_all, num_classes = 10)
X_train, X_val, y_train, y_val = model_selection.train_test_split(X_all, y_all, test_size=val_size, random_state=1 )
|
Digit Recognizer
|
9,782,103 |
len(train_df.loc[train_df['age'] == 0] )<define_variables>
|
optimizer = RMSprop(lr=0.001, rho=0.9, decay=0.0 )
|
Digit Recognizer
|
9,782,103 |
late_pay_cols = ['NumberOfTime30-59DaysPastDueNotWorse','NumberOfTimes90DaysLate','NumberOfTime60-89DaysPastDueNotWorse']<count_values>
|
def get_simple_model() :
model = Sequential()
model.add(Flatten(input_shape=(28,28,1)))
model.add(Dense(64, activation = 'sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(24, activation = 'sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(10, activation = 'softmax'))
model.compile(optimizer = optimizer, loss = "categorical_crossentropy", metrics=["accuracy"])
return model
|
Digit Recognizer
|
9,782,103 |
train_df["NumberOfTime30-59DaysPastDueNotWorse"].value_counts().sort_index()<count_values>
|
def get_cnn_model() :
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
model.compile(optimizer = optimizer, loss = "categorical_crossentropy", metrics=["accuracy"])
return model
|
Digit Recognizer
|
9,782,103 |
train_df["NumberOfTimes90DaysLate"].value_counts().sort_index()<count_values>
|
def test_submission(model):
results = model.predict(test)
results = np.argmax(results, axis=1)
results = pd.Series(results, name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False )
|
Digit Recognizer
|
9,782,103 |
train_df["NumberOfTime60-89DaysPastDueNotWorse"].value_counts().sort_index()<feature_engineering>
|
cnn_model_2, _ = train_model(get_cnn_model() , epochs = 30)
test_submission(cnn_model_2 )
|
Digit Recognizer
|
9,041,790 |
distinct_triples_counts = dict()
for arr in train_df.loc[train_df["NumberOfTimes90DaysLate"] > 17][late_pay_cols].values:
triple = ",".join(list(map(str, arr)))
if triple not in distinct_triples_counts:
distinct_triples_counts[triple] = 0
else:
distinct_triples_counts[triple] += 1
distinct_triples_counts<count_values>
|
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import np_utils
|
Digit Recognizer
|
9,041,790 |
train_df.loc[train_df["DebtRatio"] > train_df["DebtRatio"].quantile(0.975)]['MonthlyIncome'].value_counts()<filter>
|
( X_train, y_train),(X_test, y_test)= mnist.load_data()
|
Digit Recognizer
|
9,041,790 |
len(train_df[(train_df["DebtRatio"] > train_df["DebtRatio"].quantile(0.9)) &(train_df['SeriousDlqin2yrs'] == train_df['MonthlyIncome'])] )<count_values>
|
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(( X_train.shape[0], num_pixels)).astype('float32')
X_test = X_test.reshape(( X_test.shape[0], num_pixels)).astype('float32' )
|
Digit Recognizer
|
9,041,790 |
train_df["NumberRealEstateLoansOrLines"].value_counts()<count_values>
|
X_train = X_train / 255
X_test = X_test / 255
|
Digit Recognizer
|
9,041,790 |
train_df["NumberOfDependents"].value_counts()<rename_columns>
|
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
|
Digit Recognizer
|
9,041,790 |
for df in combine:
df.rename(columns={'Unnamed: 0':'ID'}, inplace=True )<data_type_conversions>
|
def baseline_model() :
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
Digit Recognizer
|
9,041,790 |
for df in combine:
df['NumberOfDependents'].fillna(0, inplace=True )<prepare_x_and_y>
|
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)
scores = model.evaluate(X_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" %(100-scores[1]*100))
|
Digit Recognizer
|
9,041,790 |
def randomforest_filled_func(df):
train = df[df.MonthlyIncome.notnull() ]
test = df[df.MonthlyIncome.isnull() ]
train_x = train.iloc[:,2:].drop('MonthlyIncome',axis=1)
train_y = train['MonthlyIncome']
test_x = test.iloc[:,2:].drop('MonthlyIncome',axis=1)
rfr = RandomForestRegressor(random_state=2021, n_estimators=200,max_depth=3,n_jobs=-1)
rfr.fit(train_x, train_y)
predicted = rfr.predict(test_x ).round(0)
print(predicted)
df.loc[(df.MonthlyIncome.isnull()), 'MonthlyIncome'] = predicted
return df<statistical_test>
|
( X_train, y_train),(X_test, y_test)= mnist.load_data()
X_train = X_train.reshape(( X_train.shape[0], 28, 28, 1)).astype('float32')
X_test = X_test.reshape(( X_test.shape[0], 28, 28, 1)).astype('float32')
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def baseline_model() :
model = Sequential()
model.add(Conv2D(32,(5, 5), input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model_simple = baseline_model()
model_simple.summary()
|
Digit Recognizer
|
9,041,790 |
for df in combine:
df = randomforest_filled_func(df )<filter>
|
model_simple.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
scores = model_simple.evaluate(X_test, y_test, verbose=1)
print("CNN Error: %.2f%%" %(100-scores[1]*100))
|
Digit Recognizer
|
9,041,790 |
train_df = train_df[train_df['age'] > 0]<filter>
|
( X_train, y_train),(X_test, y_test)= mnist.load_data()
X_train = X_train.reshape(( X_train.shape[0], 28, 28, 1)).astype('float32')
X_test = X_test.reshape(( X_test.shape[0], 28, 28, 1)).astype('float32')
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def large_model() :
model = Sequential()
model.add(Conv2D(30,(5, 5), input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(15,(3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model_large = large_model()
model_large.summary()
|
Digit Recognizer
|
9,041,790 |
train_df = train_df[train_df['RevolvingUtilizationOfUnsecuredLines'] < 13]<filter>
|
model_large.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
scores = model_large.evaluate(X_test, y_test, verbose=1)
print("Large CNN Error: %.2f%%" %(100-scores[1]*100))
|
Digit Recognizer
|
9,041,790 |
train_df = train_df[train_df['NumberOfTimes90DaysLate'] <= 17]<filter>
|
( X_train, y_train),(X_test, y_test)= mnist.load_data()
X_train = X_train.reshape(( X_train.shape[0], 28, 28, 1)).astype('float32')
X_test = X_test.reshape(( X_test.shape[0], 28, 28, 1)).astype('float32')
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def larger_model() :
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', input_shape=(28,28,1)))
model.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='relu'))
model.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))
model.add(Conv2D(filters=192, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters=192, kernel_size=5, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2, padding='same'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model_larger = larger_model()
model_larger.summary()
|
Digit Recognizer
|
9,041,790 |
train_df = train_df.loc[train_df["DebtRatio"] <= train_df["DebtRatio"].quantile(0.975)]<count_duplicates>
|
model_larger.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=100)
scores = model_larger.evaluate(X_test, y_test, verbose=1)
print("Larger CNN Error: %.2f%%" %(100-scores[1]*100))
|
Digit Recognizer
|
9,041,790 |
train_df.duplicated().value_counts()<remove_duplicates>
|
X_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' ).values.astype('float32')
X_test = X_test.reshape(-1, 28, 28, 1)
X_test = X_test.astype('float32')/255
testY = model_larger.predict_classes(X_test, verbose=1 )
|
Digit Recognizer
|
9,041,790 |
train_df = train_df.drop_duplicates()<split>
|
sub = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
sub['Label'] = testY
sub.to_csv('submission.csv',index=False )
|
Digit Recognizer
|
9,041,790 |
x_train = train_df.iloc[:,2:]
y_train = train_df['SeriousDlqin2yrs'].astype('uint8')
train_X, test_X, train_y, test_y = train_test_split(x_train,y_train,test_size=.1,random_state=2021, stratify = y_train )<find_best_model_class>
|
YouTubeVideo('3JQ3hYko51Y', width=800, height=450 )
|
Digit Recognizer
|
9,041,790 |
num_estimators = range(100,401,10)
train_scores = []
test_scores = []
for num_estimator in num_estimators:
lgbm = LGBMClassifier(is_unbalance = True,num_leaves=60,learning_rate=0.02,n_estimators=num_estimator)
lgbm.fit(train_X,train_y)
pre_y1 = lgbm.predict_proba(train_X)[:,1]
pre_y2 = lgbm.predict_proba(test_X)[:,1]
train_scores.append(roc_auc_score(train_y, pre_y1))
test_scores.append(roc_auc_score(test_y, pre_y2))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(num_estimators,test_scores,label='Testing Score')
ax.set_xlabel(r'num')
ax.set_ylabel(r'auc')
ax.set_title('num_estimators')
ax.legend(loc='best')
plt.show()<find_best_params>
|
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import np_utils
|
Digit Recognizer
|
9,041,790 |
num_estimators[test_scores.index(max(test_scores)) ]<filter>
|
( X_train, y_train),(X_test, y_test)= mnist.load_data()
|
Digit Recognizer
|
9,041,790 |
learning_rates [test_scores.index(max(test_scores)) ]<filter>
|
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(( X_train.shape[0], num_pixels)).astype('float32')
X_test = X_test.reshape(( X_test.shape[0], num_pixels)).astype('float32' )
|
Digit Recognizer
|
9,041,790 |
max_bins[test_scores.index(max(test_scores)) ]<filter>
|
X_train = X_train / 255
X_test = X_test / 255
|
Digit Recognizer
|
9,041,790 |
num_leaves[test_scores.index(max(test_scores)) ]<filter>
|
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
|
Digit Recognizer
|
9,041,790 |
max_depths[test_scores.index(max(test_scores)) ]<filter>
|
def baseline_model() :
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
Digit Recognizer
|
9,041,790 |
feature_fractions[test_scores.index(max(test_scores)) ]<train_model>
|
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)
scores = model.evaluate(X_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" %(100-scores[1]*100))
|
Digit Recognizer
|
9,041,790 |
lgbm = LGBMClassifier(is_unbalance = True,max_depth=14,num_leaves=30,max_bin=40,learning_rate=0.025,n_estimators=180,feature_fraction=0.6)
lgbm.fit(train_X,train_y)
pre_y = lgbm.predict_proba(test_X)[:,1]<predict_on_test>
|
( X_train, y_train),(X_test, y_test)= mnist.load_data()
X_train = X_train.reshape(( X_train.shape[0], 28, 28, 1)).astype('float32')
X_test = X_test.reshape(( X_test.shape[0], 28, 28, 1)).astype('float32')
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def baseline_model() :
model = Sequential()
model.add(Conv2D(32,(5, 5), input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model_simple = baseline_model()
model_simple.summary()
|
Digit Recognizer
|
9,041,790 |
test_df_x = test_df.iloc[:,2:]
pre_y2 = lgbm.predict_proba(test_df_x)[:,1]<load_from_csv>
|
model_simple.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
scores = model_simple.evaluate(X_test, y_test, verbose=1)
print("CNN Error: %.2f%%" %(100-scores[1]*100))
|
Digit Recognizer
|
9,041,790 |
result = pd.read_csv('/kaggle/input/GiveMeSomeCredit/sampleEntry.csv')
result['Probability'] = pre_y2
result.to_csv('./submit.csv',index=False)
reload = pd.read_csv('./submit.csv')
reload<load_from_csv>
|
( X_train, y_train),(X_test, y_test)= mnist.load_data()
X_train = X_train.reshape(( X_train.shape[0], 28, 28, 1)).astype('float32')
X_test = X_test.reshape(( X_test.shape[0], 28, 28, 1)).astype('float32')
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def large_model() :
model = Sequential()
model.add(Conv2D(30,(5, 5), input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(15,(3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model_large = large_model()
model_large.summary()
|
Digit Recognizer
|
9,041,790 |
warnings.filterwarnings("ignore")
x_train = pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-training.csv')
x_test = pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-test.csv')
combine = [x_train, x_test]<count_missing_values>
|
model_large.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
scores = model_large.evaluate(X_test, y_test, verbose=1)
print("Large CNN Error: %.2f%%" %(100-scores[1]*100))
|
Digit Recognizer
|
9,041,790 |
x_train.isnull().mean()<count_missing_values>
|
( X_train, y_train),(X_test, y_test)= mnist.load_data()
X_train = X_train.reshape(( X_train.shape[0], 28, 28, 1)).astype('float32')
X_test = X_test.reshape(( X_test.shape[0], 28, 28, 1)).astype('float32')
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
def larger_model() :
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', input_shape=(28,28,1)))
model.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='relu'))
model.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))
model.add(Conv2D(filters=192, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.1))
model.add(Conv2D(filters=192, kernel_size=5, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2, padding='same'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model_larger = larger_model()
model_larger.summary()
|
Digit Recognizer
|
9,041,790 |
x_test.isnull().mean()<count_missing_values>
|
model_larger.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=100)
scores = model_larger.evaluate(X_test, y_test, verbose=1)
print("Larger CNN Error: %.2f%%" %(100-scores[1]*100))
|
Digit Recognizer
|
9,041,790 |
[x_train[x_train['NumberOfDependents'].isnull() ][x_train['MonthlyIncome'].notnull() ]['DebtRatio'].count() ,
x_test[x_test['NumberOfDependents'].isnull() ][x_test['MonthlyIncome'].notnull() ]['DebtRatio'].count() ]<define_variables>
|
X_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' ).values.astype('float32')
X_test = X_test.reshape(-1, 28, 28, 1)
X_test = X_test.astype('float32')/255
testY = model_larger.predict_classes(X_test, verbose=1 )
|
Digit Recognizer
|
9,041,790 |
x_train[x_train['NumberOfDependents'].notnull() ][x_train['MonthlyIncome'].isnull() ][['NumberOfDependents']].median()<import_modules>
|
sub = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
sub['Label'] = testY
sub.to_csv('submission.csv',index=False )
|
Digit Recognizer
|
9,041,790 |
<feature_engineering><EOS>
|
YouTubeVideo('3JQ3hYko51Y', width=800, height=450 )
|
Digit Recognizer
|
8,921,515 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<filter>
|
import pandas as pd
import numpy as np
import time
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.layers import Input, Conv2D, BatchNormalization, Activation
from keras.layers import Add, Flatten, AveragePooling2D, Dense, Dropout
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import plot_model
|
Digit Recognizer
|
8,921,515 |
[len(x_train[x_train['NumberOfTime30-59DaysPastDueNotWorse'] == 98]),
len(x_train[x_train['NumberOfTime60-89DaysPastDueNotWorse'] == 98]),
len(x_train[x_train['NumberOfTimes90DaysLate'] == 98]),
len(x_train[x_train['NumberOfTime30-59DaysPastDueNotWorse'] == 98][x_train['NumberOfTimes90DaysLate'] == 98][x_train['NumberOfTime60-89DaysPastDueNotWorse'] == 98])]<filter>
|
train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
X = train.drop(columns=['label'] ).values.reshape(-1, 28, 28, 1)/ 255
y = train['label'].values
test = test.values.reshape(-1, 28, 28, 1)/ 255
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1 )
|
Digit Recognizer
|
8,921,515 |
[len(x_test[x_test['NumberOfTime30-59DaysPastDueNotWorse'] == 98]),
len(x_test[x_test['NumberOfTime60-89DaysPastDueNotWorse'] == 98]),
len(x_test[x_test['NumberOfTimes90DaysLate'] == 98]),
len(x_test[x_test['NumberOfTime30-59DaysPastDueNotWorse'] == 98][x_test['NumberOfTimes90DaysLate'] == 98][x_test['NumberOfTime60-89DaysPastDueNotWorse'] == 98])]<feature_engineering>
|
def residual_block(inputs, filters, strides=1):
y = inputs
x = Conv2D(
filters=filters,
kernel_size=3,
strides=strides,
padding='same',
)(inputs)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = Conv2D(
filters=filters,
kernel_size=3,
strides=1,
padding='same',
)(x)
x = BatchNormalization()(x)
if strides > 1:
y = Conv2D(
filters=filters,
kernel_size=3,
strides=strides,
padding='same',
)(y)
y = BatchNormalization()(y)
x = Add()([x, y])
x = Activation('relu' )(x)
return x
def resnet(input_shape, num_classes, filters, stages):
inputs = Input(shape=input_shape)
x = Conv2D(
filters=filters,
kernel_size=7,
strides=1,
padding='same',
)(inputs)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
for stage in stages:
x = residual_block(x, filters, strides=2)
for i in range(stage-1):
x = residual_block(x, filters)
filters *= 2
x = AveragePooling2D(4 )(x)
x = Flatten()(x)
x = Dropout(0.3 )(x)
x = Dense(int(filters/4), activation='relu' )(x)
outputs = Dense(num_classes, activation='softmax' )(x)
model = Model(inputs=inputs, outputs=outputs)
return model
|
Digit Recognizer
|
8,921,515 |
for dataset in combine:
dataset.loc[dataset['MonthlyIncome'].isnull() ,'DebtRatio'] = dataset.loc[dataset['MonthlyIncome'].isnull() ,'DebtRatio'] / 3915;
dataset.loc[dataset['MonthlyIncome'] == 1,'DebtRatio'] = dataset.loc[dataset['MonthlyIncome'] == 1,'DebtRatio'] / 3915;
dataset.loc[dataset['MonthlyIncome'] == 0,'DebtRatio'] = dataset.loc[dataset['MonthlyIncome'] == 0,'DebtRatio'] / 3915;<categorify>
|
def train_model(epochs, filters, stages, batch_size, visualize=False):
model = resnet(
input_shape=X[0].shape,
num_classes=np.unique(y ).shape[-1],
filters=filters,
stages=stages
)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
checkpoint = ModelCheckpoint(
filepath=f'resnet-{int(time.time())}.dhf5',
monitor='loss',
save_best_only=True
)
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.8**x)
callbacks = [checkpoint, annealer]
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1
)
datagen.fit(X)
history = model.fit_generator(
datagen.flow(X_train, y_train, batch_size=batch_size),
validation_data=(X_test, y_test),
epochs=epochs,
verbose=2,
workers=12,
callbacks=callbacks
)
if visualize:
fig, axarr = plt.subplots(1, 2, figsize=(16, 8))
axarr[0].plot(history.history['accuracy'])
axarr[0].plot(history.history['val_accuracy'])
axarr[0].set_title('Model accuracy')
axarr[0].set_ylabel('Accuracy')
axarr[0].set_xlabel('Epoch')
axarr[0].legend(['Train', 'Test'], loc='upper left')
axarr[1].plot(history.history['loss'])
axarr[1].plot(history.history['val_loss'])
axarr[1].set_title('Model loss')
axarr[1].set_ylabel('Loss')
axarr[1].set_xlabel('Epoch')
axarr[1].legend(['Train', 'Test'], loc='upper left')
plt.show()
return model
|
Digit Recognizer
|
8,921,515 |
for dataset in combine:
dataset.drop(columns = ['Unnamed: 0'], inplace = True)
dataset['RevolvingUtilizationOfUnsecuredLines'].mask(dataset['RevolvingUtilizationOfUnsecuredLines'] > 2, 2, inplace=True)
dataset['NumberOfTime60-89DaysPastDueNotWorse'].mask(dataset['NumberOfTime60-89DaysPastDueNotWorse'] > 95, 20, inplace=True)
dataset['NumberOfTime30-59DaysPastDueNotWorse'].mask(dataset['NumberOfTime30-59DaysPastDueNotWorse'] > 95, 20, inplace=True)
dataset['NumberOfTimes90DaysLate'].mask(dataset['NumberOfTimes90DaysLate'] > 95, 20, inplace=True)
dataset['90_sum'] = dataset['NumberOfTime30-59DaysPastDueNotWorse'] + dataset['NumberOfTimes90DaysLate'] + dataset['NumberOfTime60-89DaysPastDueNotWorse']
dataset['DebtRatio'].mask(dataset['DebtRatio'] > 400, 400, inplace=True)
dataset['MonthlyIncome'].replace(np.nan, 0, inplace=True)
dataset['MonthlyIncome'].replace(1, 0, inplace=True)
dataset['NumberOfDependents'].replace(np.nan, 0, inplace=True)
x_train = x_train[x_train['age'] > 20]
x_train = x_train[x_train['MonthlyIncome'] <= 1e6]
x_train = x_train[x_train['NumberRealEstateLoansOrLines'] <= 50]<split>
|
models = []
for i in range(1):
print('-------------------------')
print('Model: ', i+1)
print('-------------------------')
model = train_model(
epochs=10,
filters=64,
stages=[3, 3, 3],
batch_size=128,
visualize=True
)
models.append(model )
|
Digit Recognizer
|
8,921,515 |
result = x_train['SeriousDlqin2yrs' ]
data = x_train.drop(['SeriousDlqin2yrs'], axis = 1)
train_data, test_data, train_result, test_result = train_test_split(data, result, test_size = 0.2, stratify = result, random_state=33 )<import_modules>
|
predictions = []
for model in models:
predictions.append(model.predict(test))
predictions = np.sum(predictions, axis=0)
predictions = np.argmax(predictions, axis=1)
submission = pd.DataFrame({'ImageId': np.arange(1, 28001, 1), 'Label': predictions})
submission.to_csv('mnist_resnet_submission.csv', index=False )
|
Digit Recognizer
|
8,678,113 |
models = []
scores = []
models.append("AdaBoost")
print("AdaBoost:")
model = AdaBoostClassifier()
result = cross_val_score(model,train_data,train_result,scoring='roc_auc',cv=StratifiedKFold(n_splits=10))
print('mean score:', result.mean())
scores.append(result.mean())
models.append("LGBM")
print("LGBM:")
model = LGBMClassifier()
result = cross_val_score(model,train_data,train_result,scoring='roc_auc',cv=StratifiedKFold(n_splits=10))
print('mean score:', result.mean())
scores.append(result.mean())
models.append("XGBoost")
print("XGBoost:")
model = XGBClassifier(eval_metric = 'auc')
result = cross_val_score(model,train_data,train_result,scoring='roc_auc',cv=StratifiedKFold(n_splits=10))
print('mean score:', result.mean())
scores.append(result.mean())
models.append("GBDT")
print("GBDT:")
model = GradientBoostingClassifier()
result = cross_val_score(model,train_data,train_result,scoring='roc_auc',cv=StratifiedKFold(n_splits=10))
print('mean score:', result.mean())
scores.append(result.mean())
models.append("RandomForest")
print("RandomForest:")
model = RandomForestClassifier()
result = cross_val_score(model,train_data,train_result,scoring='roc_auc',cv=StratifiedKFold(n_splits=10))
print('mean score:', result.mean())
scores.append(result.mean())
models.append("SVM")
print("SVM:")
model = svm.SVC()
result = cross_val_score(model,train_data,train_result,scoring='roc_auc',cv=StratifiedKFold(n_splits=10))
print('mean score:', result.mean())
scores.append(result.mean())
<choose_model_class>
|
train = pd.read_csv(".. /input/train.csv")
print(train.shape)
train.head()
|
Digit Recognizer
|
8,678,113 |
model = LGBMClassifier()
model.fit(train_data, train_result)
predict_proba = model.predict_proba(test_data)[:,1]
roc_auc_score(test_result, predict_proba )<find_best_model_class>
|
test= pd.read_csv(".. /input/test.csv")
print(test.shape)
test.head()
|
Digit Recognizer
|
8,678,113 |
model = LGBMClassifier(n_estimators = 290, learning_rate = 0.025,max_depth = 6, num_leaves = 22)
model.fit(train_data, train_result)
predict_proba = model.predict_proba(test_data)[:,1]
roc_auc_score(test_result, predict_proba )<save_to_csv>
|
X_train =(train.iloc[:,1:].values ).astype('float32')
y_train = train.iloc[:,0].values.astype('int32')
X_test = test.values.astype('float32')
X = X_train
y = y_train
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.10, random_state=42 )
|
Digit Recognizer
|
8,678,113 |
x_test = x_test.drop(['SeriousDlqin2yrs'],axis=1)
y_test = model.predict_proba(x_test)[:,1]
ids = np.arange(1,101504)
res = pd.DataFrame({'Id': ids, 'Probability': y_test})
res.to_csv("submission.csv", index=False )<load_from_csv>
|
\
mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)
def standardize(x):
return(x-mean_px)/std_px
|
Digit Recognizer
|
8,678,113 |
train_df = pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-training.csv')
test_df = pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-test.csv' )<count_missing_values>
|
y_train = np_utils.to_categorical(y_train,10)
X = X.reshape(-1,28,28,1)
y = np_utils.to_categorical(y,10 )
|
Digit Recognizer
|
8,678,113 |
train_df.isnull().sum()<count_missing_values>
|
seed = 43
np.random.seed(seed )
|
Digit Recognizer
|
8,678,113 |
test_df.isnull().sum()<filter>
|
from keras.models import Sequential
from keras.layers.core import Lambda , Dense, Flatten, Dropout
from keras.callbacks import EarlyStopping
from keras.layers import BatchNormalization, Convolution2D , MaxPooling2D
|
Digit Recognizer
|
8,678,113 |
train_df.loc[train_df['age'] < 18]<remove_duplicates>
|
def get_bn_model() :
model = Sequential([
Lambda(standardize, input_shape=(28,28,1)) ,
Convolution2D(32,(3,3), activation='relu'),
BatchNormalization(axis=1),
Convolution2D(32,(3,3), activation='relu'),
MaxPooling2D() ,
BatchNormalization(axis=1),
Convolution2D(64,(3,3), activation='relu'),
BatchNormalization(axis=1),
Convolution2D(64,(3,3), activation='relu'),
MaxPooling2D() ,
Flatten() ,
BatchNormalization() ,
Dense(512, activation='relu'),
BatchNormalization() ,
Dense(10, activation='softmax')
])
model.compile(Adam() , loss='categorical_crossentropy', metrics=['accuracy'])
return model
|
Digit Recognizer
|
8,678,113 |
train_df.drop_duplicates(inplace=True )<rename_columns>
|
model= get_bn_model()
gen =ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.08)
batches = gen.flow(X, y, batch_size=64)
history=model.fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=1)
|
Digit Recognizer
|
8,678,113 |
<filter><EOS>
|
X_test.shape
results = model.predict(X_test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("Aniket_Digit_pred2.csv",index=False )
|
Digit Recognizer
|
10,799,576 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
|
num_classes = 10
train_fname = '/kaggle/input/digit-recognizer/train.csv'
train_db = pd.read_csv(train_fname)
label = train_db['label'].to_numpy()
label = to_categorical(label, num_classes=num_classes)
data = train_db.drop('label', axis=1 ).to_numpy()
data = data.reshape(( data.shape[0], 28, 28, 1))
data = data.astype('float32')/ 255
x_train = data [:32000]
y_train = label[:32000]
x_valid = data [32000:]
y_valid = label[32000:]
del train_db
|
Digit Recognizer
|
10,799,576 |
k = 0
for i in train_df['NumberOfDependents']:
if i>8:
train_df['NumberOfDependents'].values[k]=8
k +=1
k = 0
for i in test_df['NumberOfDependents']:
if i>8:
test_df['NumberOfDependents'].values[k]=8
k +=1<feature_engineering>
|
train_datagen = ImageDataGenerator(rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1)
batch_size = 16
train_generator = train_datagen.flow(x_train, y_train, batch_size=batch_size )
|
Digit Recognizer
|
10,799,576 |
train_df.loc[train_df['age'] < 18, 'age'] = train_df['age'].median()<feature_engineering>
|
model = Sequential([
Conv2D(32,(3, 3), activation='relu', input_shape=(28, 28, 1)) ,
Conv2D(32,(3, 3), activation='relu'),
SeparableConv2D(32,(5, 5), activation='relu', padding='same'),
BatchNormalization() ,
MaxPooling2D(( 2,2)) ,
SpatialDropout2D(0.35),
Conv2D(64,(3, 3), activation='relu'),
Conv2D(64,(3, 3), activation='relu'),
SeparableConv2D(64,(5, 5), activation='relu', padding='same'),
BatchNormalization() ,
MaxPooling2D(( 2,2)) ,
SpatialDropout2D(0.35),
Flatten() ,
Dense(256, activation='relu', kernel_regularizer=l2(1e-3)) ,
Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
model.summary()
|
Digit Recognizer
|
10,799,576 |
train_df['MonthlyIncome'] = train_df['MonthlyIncome'].replace(np.nan,train_df['MonthlyIncome'].mean())
test_df['MonthlyIncome'] = test_df['MonthlyIncome'].replace(np.nan,test_df['MonthlyIncome'].mean())
train_df['NumberOfDependents'] = train_df['NumberOfDependents'].replace(np.nan,train_df['NumberOfDependents'].median())
test_df['NumberOfDependents'] = test_df['NumberOfDependents'].replace(np.nan,test_df['NumberOfDependents'].median() )<split>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 30
performance = model.fit(train_generator,
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=(x_valid, y_valid),
callbacks=[learning_rate_reduction] )
|
Digit Recognizer
|
10,799,576 |
<save_to_csv><EOS>
|
test_fname = '/kaggle/input/digit-recognizer/test.csv'
test_db = pd.read_csv(test_fname)
x_test = test_db.to_numpy()
x_test = x_test.reshape(( x_test.shape[0], 28, 28, 1))
x_test = x_test.astype('float32')/ 255
predictions = np.argmax(model.predict(x_test), axis=1)
imageIds = pd.Series(range(1,28001), name='ImageId')
results = pd.Series(predictions , name='Label')
submission = pd.concat([imageIds, results], axis=1)
submission.to_csv("submission.csv", index=False, header=True)
|
Digit Recognizer
|
10,728,166 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<import_modules>
|
%matplotlib inline
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
Digit Recognizer
|
10,728,166 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score,roc_curve, auc
from sklearn.ensemble import RandomForestRegressor
from lightgbm import LGBMClassifier<load_from_csv>
|
df_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
df_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
df_submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv" )
|
Digit Recognizer
|
10,728,166 |
train=pd.read_csv('/kaggle/input/GiveMeSomeCredit/cs-training.csv')
train.info()
<feature_engineering>
|
print("Train File", df_train.isnull().any().sum())
print("Test File", df_test.isnull().any().sum() )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.