kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
4,340,698 |
for col in cat_dataset:
cat_dataset[col] = cat_dataset[col].astype('category')
<data_type_conversions>
|
def train_classifier(Y_pseudo, params=params_4):
preds, oof, auc_all = initialize_cv()
print(f"Computing centroids and covariances for the four clusters(two per class ).")
for i in tqdm(range(magic_min, magic_max+1)) :
X, X_test, Y, idx_train, idx_test = get_data(i=i, data=data)
auc_folds=np.array([])
folds = StratifiedKFold(n_splits=NFOLDS, random_state=RS)
for train_index, val_index in folds.split(X, Y):
X_train, Y_train = X[train_index, :], Y[train_index]
X_val, Y_val = X[val_index, :], Y[val_index]
if Y_pseudo is None:
params['means_init'], params['precisions_init'] = clusters_stats(X_train, Y_train)
else:
X_aug, Y_aug = pseudolabeling(X_train, X_test, Y_train, Y_pseudo, idx_test)
params['means_init'], params['precisions_init'] = clusters_stats(X_aug, Y_aug.ravel())
clf = GaussianMixture(**params)
clf.fit(np.concatenate([X_train, X_test], axis = 0))
oof[idx_train[val_index]] = np.sum(clf.predict_proba(X_val)[:, 2:], axis=1)
preds[idx_test] += np.sum(clf.predict_proba(X_test)[:,2: ], axis=1)/NFOLDS
auc = roc_auc_score(Y_val, oof[idx_train[val_index]])
auc_folds = np.append(auc_folds, auc)
auc_all = np.append(auc_all, np.mean(auc_folds))
report_results(oof, auc_all)
return preds, oof, auc_all
|
Instant Gratification
|
4,340,698 |
num_dataset = dataset.select_dtypes(exclude=['object','category'] ).astype('float64' )<define_variables>
|
Y_pseudo, oof, auc_all = train_classifier(Y_pseudo=None )
|
Instant Gratification
|
4,340,698 |
pos = np.where(num_dataset < 0)
pos[0]<feature_engineering>
|
preds_gmm, oof_gmm, auc_gmm = train_classifier(Y_pseudo=Y_pseudo)
sub['target'] = preds_gmm
sub.to_csv('submission_gmm.csv',index=False )
|
Instant Gratification
|
4,340,698 |
num_dataset.loc[ num_dataset['RemodAfterBuilt'] < 0,'RemodAfterBuilt'] = 0
num_dataset.loc[ num_dataset['SoldAfterBuilt'] < 0,'SoldAfterBuilt'] = 0<define_variables>
|
def get_labels(X_train, Y_train, params=params_2_qda):
X_train_0 = X_train[Y_train==0]
X_train_1 = X_train[Y_train==1]
clf_0 = GaussianMixture(**params)
labels_0 = clf_0.fit_predict(X_train_0 ).reshape(-1, 1)
clf_1 = GaussianMixture(**params)
labels_1 = clf_1.fit_predict(X_train_1 ).reshape(-1, 1)
labels_1[labels_1==0] = 2
labels_1[labels_1==1] = 3
X_l = np.vstack(( X_train_0, X_train_1))
Y_l = np.vstack(( labels_0, labels_1))
perm = np.random.permutation(len(X_l))
X_l = X_l[perm]
Y_l = Y_l[perm]
return X_l, Y_l
|
Instant Gratification
|
4,340,698 |
pos = np.where(num_dataset < 0)
pos[0]<feature_engineering>
|
def train_qda(Y_pseudo, low, high, params=params_qda):
preds, oof, auc_all = initialize_cv()
print(f"Computing centroids and covariances for the four clusters(two per class ).")
for i in tqdm(range(magic_min, magic_max+1)) :
X, X_test, Y, idx_train, idx_test = get_data(i=i, data=data)
auc_folds=np.array([])
folds = StratifiedKFold(n_splits=NFOLDS, random_state=RS)
for train_index, val_index in folds.split(X, Y):
X_train, Y_train = X[train_index, :], Y[train_index]
X_val, Y_val = X[val_index, :], Y[val_index]
clf = QuadraticDiscriminantAnalysis(**params)
if Y_pseudo is None:
X_l, Y_l = get_labels(X_train, Y_train)
else:
X_aug, Y_aug = pseudolabeling_qda(X_train, X_test, Y_train, Y_pseudo, idx_test, low, high)
X_l, Y_l = get_labels(X_aug, Y_aug.ravel())
clf.fit(X_l, Y_l.ravel())
oof[idx_train[val_index]] = np.sum(clf.predict_proba(X_val)[:, 2:], axis=1)
preds[idx_test] += np.sum(clf.predict_proba(X_test)[:,2: ], axis=1)/NFOLDS
auc = roc_auc_score(Y_val, oof[idx_train[val_index]])
auc_folds = np.append(auc_folds, auc)
auc_all = np.append(auc_all, np.mean(auc_folds))
report_results(oof, auc_all, clf_name='QDA')
return preds, oof, auc_all
|
Instant Gratification
|
4,340,698 |
skew_feats = skew_feats[abs(skew_feats)> 1]
print(skew_feats)
for feat in skew_feats.index:
num_dataset[feat] = np.log1p(num_dataset[feat] )<categorify>
|
Y_pseudo=preds_gmm
for rp, low, high in zip(rp_values, low_vals, high_vals):
parmas_qda = {'reg_param': rp}
Y_pseudo, oof_qda, auc_qda = train_qda(Y_pseudo=Y_pseudo, low=low, high=high, params=params_qda)
preds_qda = Y_pseudo
sub['target'] = preds_qda
sub.to_csv('submission_qda.csv',index=False )
|
Instant Gratification
|
4,340,698 |
cat_dataset = pd.get_dummies(cat_dataset, columns = cat_dataset.columns);<define_variables>
|
preds_highest = preds_gmm
oof_highest = oof_gmm
mask =(auc_qda > auc_gmm)
print(f"The number of models where QDA's predictions are better is {sum(mask)}." )
|
Instant Gratification
|
4,340,698 |
np.where(num_dataset < 0 )<feature_engineering>
|
for i in tqdm(range(magic_min, magic_max+1)) :
if mask[i]:
_, _, _, idx_train, idx_test = get_data(i=i, data=data)
oof_highest[idx_train] = oof_qda[idx_train]
preds_highest[idx_test] = preds_qda[idx_test]
auc = roc_auc_score(train['target'].values, oof_highest)
print(f"The 'highest' ROC AUC score is {auc}." )
|
Instant Gratification
|
4,340,698 |
for x in num_dataset:
num_dataset[x] =(num_dataset[x] - num_dataset[x].mean())/(num_dataset[x].std())
<count_missing_values>
|
sub['target'] = preds_highest
sub.to_csv('submission_highest.csv',index=False )
|
Instant Gratification
|
4,340,698 |
sum(np.isnan(num_dataset ).any() )<correct_missing_values>
|
oof_all = pd.DataFrame()
preds_all = pd.DataFrame()
oof_all['gmm'] = rankdata(oof_gmm)/len(oof_gmm)
oof_all['qda'] = rankdata(oof_qda)/len(oof_qda)
preds_all['gmm'] = rankdata(preds_gmm)/len(preds_gmm)
preds_all['qda'] = rankdata(preds_qda)/len(preds_qda)
lr = LogisticRegression()
lr.fit(oof_all.values, train['target'].values)
preds_lr = lr.predict_proba(preds_all.values)[:,1]
preds_train = lr.predict_proba(oof_all)[:,1]
auc = roc_auc_score(train['target'].values, preds_train)
print(f"The final ROC AUC score is {auc}." )
|
Instant Gratification
|
4,340,698 |
np.where(np.isnan(num_dataset))<count_missing_values>
|
sub['target'] = preds_lr
sub.to_csv('submission_lr.csv',index=False )
|
Instant Gratification
|
4,340,698 |
<prepare_x_and_y><EOS>
|
w = 0.02
mask =(preds_gmm <(0.5 + w)) &(preds_gmm >(0.5 - w))
preds = rankdata(preds_gmm)/len(preds_gmm)
preds[mask] = preds_lr[mask]
sub['target'] = preds
sub.to_csv('submission_picking.csv',index=False )
|
Instant Gratification
|
4,323,861 |
<SOS> metric: AUC Kaggle data source: instant-gratification<count_missing_values>
|
warnings.filterwarnings("ignore")
|
Instant Gratification
|
4,323,861 |
sum(X_test.isnull().sum())
X.shape<import_modules>
|
train = pd.read_csv('.. /input/train.csv')
train_t = train.copy()
test = pd.read_csv('.. /input/test.csv' )
|
Instant Gratification
|
4,323,861 |
cross_val_score, \
StratifiedKFold, \
learning_curve,\
KFold,\
cross_val_predict;
sns.set(style='white', context='notebook', palette='deep');
warnings.filterwarnings(action='ignore', category=UserWarning)
<train_model>
|
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
reg_best = [0.5,
0.2,
0.3,
0.2,
0.5,
0.1,
0.1,
0.2,
0.3,
0.5,
0.2,
0.4,
0.1,
0.3,
0.1,
0.4,
0.3,
0.2,
0.2,
0.5,
0.1,
0.4,
0.4,
0.1,
0.5,
0.4,
0.1,
0.4,
0.4,
0.1,
0.1,
0.3,
0.4,
0.1,
0.5,
0.2,
0.3,
0.1,
0.1,
0.5,
0.5,
0.5,
0.3,
0.5,
0.4,
0.1,
0.1,
0.1,
0.5,
0.5,
0.5,
0.1,
0.3,
0.1,
0.1,
0.4,
0.2,
0.3,
0.1,
0.1,
0.5,
0.2,
0.4,
0.1,
0.1,
0.1,
0.1,
0.4,
0.2,
0.1,
0.1,
0.5,
0.4,
0.1,
0.3,
0.2,
0.4,
0.1,
0.3,
0.5,
0.1,
0.5,
0.1,
0.5,
0.1,
0.1,
0.4,
0.5,
0.4,
0.2,
0.1,
0.1,
0.4,
0.5,
0.2,
0.5,
0.5,
0.4,
0.1,
0.5,
0.5,
0.3,
0.5,
0.2,
0.4,
0.4,
0.1,
0.4,
0.4,
0.1,
0.1,
0.5,
0.5,
0.5,
0.1,
0.2,
0.4,
0.1,
0.4,
0.5,
0.5,
0.5,
0.2,
0.2,
0.2,
0.5,
0.1,
0.1,
0.3,
0.5,
0.3,
0.1,
0.4,
0.1,
0.3,
0.1,
0.2,
0.5,
0.5,
0.1,
0.1,
0.1,
0.4,
0.1,
0.5,
0.5,
0.5,
0.1,
0.5,
0.5,
0.1,
0.5,
0.5,
0.2,
0.4,
0.2,
0.1,
0.5,
0.3,
0.5,
0.2,
0.4,
0.4,
0.5,
0.2,
0.3,
0.1,
0.1,
0.5,
0.1,
0.5,
0.5,
0.5,
0.5,
0.1,
0.5,
0.4,
0.1,
0.4,
0.3,
0.4,
0.4,
0.3,
0.1,
0.4,
0.4,
0.2,
0.5,
0.4,
0.4,
0.2,
0.1,
0.2,
0.5,
0.5,
0.1,
0.5,
0.3,
0.4,
0.5,
0.1,
0.5,
0.5,
0.5,
0.1,
0.1,
0.3,
0.2,
0.5,
0.1,
0.5,
0.5,
0.4,
0.1,
0.5,
0.1,
0.5,
0.1,
0.3,
0.3,
0.1,
0.1,
0.1,
0.4,
0.3,
0.1,
0.1,
0.4,
0.3,
0.3,
0.4,
0.5,
0.2,
0.1,
0.5,
0.5,
0.4,
0.4,
0.3,
0.1,
0.1,
0.5,
0.1,
0.1,
0.1,
0.1,
0.3,
0.3,
0.2,
0.1,
0.5,
0.4,
0.3,
0.1,
0.3,
0.1,
0.2,
0.4,
0.5,
0.3,
0.1,
0.1,
0.3,
0.3,
0.4,
0.4,
0.2,
0.5,
0.1,
0.5,
0.3,
0.1,
0.2,
0.5,
0.1,
0.1,
0.5,
0.4,
0.1,
0.5,
0.5,
0.5,
0.3,
0.2,
0.4,
0.5,
0.4,
0.3,
0.1,
0.4,
0.3,
0.2,
0.2,
0.1,
0.4,
0.4,
0.1,
0.2,
0.1,
0.5,
0.3,
0.2,
0.1,
0.2,
0.3,
0.2,
0.5,
0.4,
0.5,
0.5,
0.1,
0.1,
0.4,
0.3,
0.3,
0.4,
0.3,
0.2,
0.5,
0.4,
0.1,
0.1,
0.4,
0.1,
0.1,
0.5,
0.4,
0.1,
0.4,
0.5,
0.3,
0.2,
0.5,
0.4,
0.4,
0.5,
0.1,
0.1,
0.5,
0.5,
0.5,
0.1,
0.5,
0.1,
0.5,
0.2,
0.1,
0.1,
0.1,
0.5,
0.5,
0.4,
0.5,
0.1,
0.3,
0.5,
0.5,
0.3,
0.5,
0.1,
0.3,
0.1,
0.4,
0.3,
0.5,
0.5,
0.5,
0.4,
0.2,
0.5,
0.5,
0.5,
0.5,
0.1,
0.1,
0.1,
0.5,
0.4,
0.3,
0.1,
0.5,
0.5,
0.2,
0.3,
0.5,
0.5,
0.1,
0.1,
0.1,
0.5,
0.3,
0.1,
0.4,
0.1,
0.1,
0.5,
0.5,
0.4,
0.1,
0.5,
0.4,
0.2,
0.5,
0.1,
0.4,
0.1,
0.1,
0.1,
0.4,
0.2,
0.1,
0.2,
0.2,
0.5,
0.4,
0.1,
0.1,
0.1,
0.4,
0.5,
0.4,
0.1,
0.1,
0.1,
0.1,
0.1,
0.1,
0.3,
0.5,
0.3,
0.5,
0.5,
0.5,
0.5,
0.5,
0.3,
0.5,
0.5,
0.1,
0.5,
0.1,
0.1,
0.1,
0.1,
0.2,
0.1,
0.5,
0.5,
0.1,
0.1,
0.4,
0.3,
0.1,
0.2,
0.1,
0.1,
0.1,
0.3,
0.5,
0.2,
0.1,
0.3,
0.2,
0.4,
0.4,
0.2,
0.1,
0.3,
0.1,
0.1,
0.4,
0.1,
0.2,
0.4,
0.5,
0.3,
0.1,
0.1,
0.5,
0.5,
0.5,
0.5,
0.5,
0.1,
0.1,
0.1,
0.1,
0.4,
0.1,
0.4,
0.2,
0.1,
0.1,
0.4,
0.1,
0.5,
0.2,
0.1,
0.1,
0.3,
0.5,
0.1,
0.5,
0.5,
0.1,
0.1,
0.2,
0.1,
0.1,
0.1,
0.2,
0.3]
|
Instant Gratification
|
4,323,861 |
def train_model(estimator=None, X_train=None,y_train=None,X_cv=None,y_cv=None,scoring=None):
m = estimator
if(scoring == 'neg_mean_squared_error'):
m.fit(X_train,np.log1p(y_train))
if(scoring == 'neg_mean_squared_log_error'):
m.fit(X_train,y_train)
pred=estimator.predict(X_cv)
if(scoring == 'neg_mean_squared_error'):
score = np.sqrt(mean_squared_error(np.log1p(y_cv),pred))
if(scoring == 'neg_mean_squared_log_error'):
score = np.sqrt(mean_squared_log_error(y_cv,pred))
return score,m<save_to_csv>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
data2 = VarianceThreshold(threshold=2 ).fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = QuadraticDiscriminantAnalysis(reg_best[i])
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
|
Instant Gratification
|
4,323,861 |
def gen_submission(csvname=None,X_test=None,models=None,scoring=None):
if(scoring == 'neg_mean_squared_error'):
pred=np.expm1(models.predict(X_test))
if(scoring == 'neg_mean_squared_log_error'):
pred=models.predict(X_test)
result = pd.concat([ids,pd.Series(pred ).astype('float64')],axis=1);
result.columns = ['Id','SalePrice']
result.to_csv(csvname+r'.csv',index=False)
pass<data_type_conversions>
|
auc = roc_auc_score(train_t['target'], oof)
print(f'AUC: {auc:.5}' )
|
Instant Gratification
|
4,323,861 |
X = X.astype('float64' )<count_missing_values>
|
train.loc[oof > 0.99, 'target'] = 1
train.loc[oof < 0.01, 'target'] = 0
|
Instant Gratification
|
4,323,861 |
sum(np.isnan(X ).all() )<count_values>
|
oof_ls = np.zeros(len(train))
pred_te_ls = np.zeros(len(test))
for k in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2['target']=-1
train2p = pd.concat([train2,test2],axis=0)
train2p.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2p[cols])
train4p = sel.transform(train2p[cols])
train4 = sel.transform(train2[cols])
test4 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train4p, train2p['target']):
test_index3 = test_index[ test_index<len(train4)]
clf = LabelSpreading(gamma=0.01,kernel='rbf', max_iter=10,n_jobs=-1)
clf.fit(train4p[train_index,:],train2p.loc[train_index]['target'])
oof_ls[idx1[test_index3]] = clf.predict_proba(train4[test_index3,:])[:,1]
pred_te_ls[test2.index] += clf.predict_proba(test4)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof_ls)
print('CV for LabelSpreading =',round(auc,5))
|
Instant Gratification
|
4,323,861 |
sum(np.isinf(X ).any() )<drop_column>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
data2 = VarianceThreshold(threshold=2 ).fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
train4 = np.hstack([train3,np.array([oof_ls[idx1]] ).T])
test4 = np.hstack([test3,np.array([pred_te_ls[idx2]] ).T])
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train4, train2['target']):
clf = QuadraticDiscriminantAnalysis(reg_best[i])
clf.fit(train4[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train4[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test4)[:,1] / skf.n_splits
auc = roc_auc_score(train_t['target'], oof)
print(f'AUC: {auc:.5}' )
|
Instant Gratification
|
4,323,861 |
<split><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = preds
sub.to_csv('submission.csv',index=False )
|
Instant Gratification
|
4,365,744 |
<SOS> metric: AUC Kaggle data source: instant-gratification<compute_train_metric>
|
warnings.filterwarnings('ignore')
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
print(train.shape, test.shape)
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
for itr in range(4):
test['target'] = preds
test.loc[test['target'] > 0.955, 'target'] = 1
test.loc[test['target'] < 0.045, 'target'] = 0
usefull_test = test[(test['target'] == 1)|(test['target'] == 0)]
new_train = pd.concat([train, usefull_test] ).reset_index(drop=True)
print(usefull_test.shape[0], "Test Records added for iteration : ", itr)
new_train.loc[oof > 0.995, 'target'] = 1
new_train.loc[oof < 0.005, 'target'] = 0
oof2 = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = new_train[new_train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train[train['wheezy-copper-turtle-magic']==i].index
idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]
test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=time.time)
for train_index, test_index in skf.split(train2, train2['target']):
oof_test_index = [t for t in test_index if t < len(idx1)]
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
if len(oof_test_index)> 0:
oof2[idx1[oof_test_index]] = clf.predict_proba(train3[oof_test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof2)
print(f'AUC: {auc:.5}')
sub1 = pd.read_csv('.. /input/sample_submission.csv')
sub1['target'] = preds
|
Instant Gratification
|
4,365,744 |
scores = -1 * cross_val_score(LR_lasso,X,np.log1p(y),scoring=scoring,cv=5)
np.sqrt(scores ).mean()<choose_model_class>
|
warnings.filterwarnings('ignore')
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
print(train.shape, test.shape)
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
for itr in range(4):
test['target'] = preds
test.loc[test['target'] > 0.94, 'target'] = 1
test.loc[test['target'] < 0.06, 'target'] = 0
usefull_test = test[(test['target'] == 1)|(test['target'] == 0)]
new_train = pd.concat([train, usefull_test] ).reset_index(drop=True)
print(usefull_test.shape[0], "Test Records added for iteration : ", itr)
new_train.loc[oof > 0.98, 'target'] = 1
new_train.loc[oof < 0.02, 'target'] = 0
oof2 = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = new_train[new_train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train[train['wheezy-copper-turtle-magic']==i].index
idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]
test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
oof_test_index = [t for t in test_index if t < len(idx1)]
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
if len(oof_test_index)> 0:
oof2[idx1[oof_test_index]] = clf.predict_proba(train3[oof_test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof2)
print(f'AUC: {auc:.5}')
sub2 = pd.read_csv('.. /input/sample_submission.csv')
sub2['target'] = preds
|
Instant Gratification
|
4,365,744 |
LR = Lasso()
kf = StratifiedKFold(n_splits=10,shuffle=True)
lasso_param_grid = {
"alpha":[0.001,0.0005,0.0007] ,
"max_iter":[1000,800,500],
"tol":[0.001,0.002,0.005,0.01,0.02,0.04,],
}
rcv_param_grid = {}
gsLR = GridSearchCV(LR,param_grid = lasso_param_grid, cv=5,n_jobs= -1, verbose = 1)
gsLR.scoring = scoring
gsLR.fit(X,np.log1p(y))
bestLR= gsLR.best_estimator_
print(bestLR)
print(np.sqrt(-gsLR.best_score_))<compute_train_metric>
|
warnings.filterwarnings('ignore')
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
print(train.shape, test.shape)
oof = np.zeros(len(train))
preds = np.zeros(len(test))
params = [{'reg_param': [0.1, 0.2, 0.3, 0.4, 0.5]}]
reg_params = np.zeros(512)
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
qda = QuadraticDiscriminantAnalysis()
clf = GridSearchCV(qda, params, cv=4)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
reg_params[i] = clf.best_params_['reg_param']
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
for itr in range(10):
test['target'] = preds
test.loc[test['target'] > 0.955, 'target'] = 1
test.loc[test['target'] < 0.045, 'target'] = 0
usefull_test = test[(test['target'] == 1)|(test['target'] == 0)]
new_train = pd.concat([train, usefull_test] ).reset_index(drop=True)
print(usefull_test.shape[0], "Test Records added for iteration : ", itr)
new_train.loc[oof > 0.995, 'target'] = 1
new_train.loc[oof < 0.005, 'target'] = 0
oof2 = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = new_train[new_train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train[train['wheezy-copper-turtle-magic']==i].index
idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]
test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=time.time)
for train_index, test_index in skf.split(train2, train2['target']):
oof_test_index = [t for t in test_index if t < len(idx1)]
clf = QuadraticDiscriminantAnalysis(reg_params[i])
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
if len(oof_test_index)> 0:
oof2[idx1[oof_test_index]] = clf.predict_proba(train3[oof_test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof2)
print(f'AUC: {auc:.5}')
sub3 = pd.read_csv('.. /input/sample_submission.csv')
sub3['target'] = preds
|
Instant Gratification
|
4,365,744 |
def rmse_cv(model):
rmse= np.sqrt(-cross_val_score(model, X, np.log1p(y), scoring="neg_mean_squared_error", cv = 5))
return(rmse )<import_modules>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub.head()
|
Instant Gratification
|
4,365,744 |
print(os.listdir(".. /input"))
warnings.filterwarnings("ignore")
<define_variables>
|
sub['target'] = 1/3*sub1.target + 1/3*sub2.target + 1/3*sub3.target
|
Instant Gratification
|
4,365,744 |
<load_from_csv><EOS>
|
sub.to_csv('submission.csv', index = False)
sub.head()
|
Instant Gratification
|
3,961,870 |
<SOS> metric: AUC Kaggle data source: instant-gratification<load_from_csv>
|
import numpy as np, pandas as pd, os
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import roc_auc_score
from tqdm import tqdm, tqdm_notebook
from sklearn.covariance import EmpiricalCovariance
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import sympy
from sklearn import svm, neighbors, linear_model, neural_network
from xgboost import XGBClassifier
from sklearn.covariance import *
from sklearn.utils.validation import check_random_state
from sklearn.mixture import *
from sklearn.cluster import *
|
Instant Gratification
|
3,961,870 |
print("Lecture des fichiers test.csv")
test = pd.read_csv(ROOT_DIR+"test.csv",sep=',')
print(test.shape )<load_from_csv>
|
%time
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
sub = pd.read_csv('.. /input/sample_submission.csv')
train.head()
|
Instant Gratification
|
3,961,870 |
print("Lecture des fichiers gender_submission.csv")
submit = pd.read_csv(ROOT_DIR+"gender_submission.csv",sep=',')
print(submit.shape )<concatenate>
|
warnings.filterwarnings('ignore' )
|
Instant Gratification
|
3,961,870 |
print("Concatenation train + test")
train['X'] = 'X'
test['X'] = 'Y'
big = pd.concat([train,test] )<feature_engineering>
|
def dist(array, centre):
x=float(0)
for i in range(len(array)) :
x +=(( array[i]-centre[i])*(array[i]-centre[i]))
return x
def get_c(data):
data.drop('labels', axis=1, inplace=True)
centre = [1]*data.shape[1]
for i in range(data.shape[1]):
if data[i].mean() < 0:
centre[i] = -1
return(centre)
def my_min(a, b, c):
if a<b:
if a<c: return a
return c
if b<c: return b
return c
|
Instant Gratification
|
3,961,870 |
big['XName'] = big['Name'].apply(lambda x: str(x)[0:str(x ).find(',')] if str(x ).find(',')!= -1 else x)
big['TName'] = big['Name'].apply(lambda x: str(x)[str(x ).find(',')+2:str(x ).find('.')+1:] if str(x ).find('.')!= -1 else x)
big['XCabin'] = big['Cabin'].apply(lambda x: 'U' if(x is np.nan or x != x)else str(x)[0])
big['LTick'] = big['Ticket'].apply(lambda x: str(x)[0:str(x ).find(' ')] if str(x ).find(' ')!= -1 else ' ')
K = big.groupby(['Ticket'] ).groups
for name,group in K.items() :
if len(group)> 1:
CN = list(set([ str(x)[0] for x in big['Cabin'].iloc[group] ])- set(['n']))
if(len(CN)== 0):
big['XCabin'].iloc[group] = 'U'
else:
big['XCabin'].iloc[group] = CN[0]<feature_engineering>
|
def classify(data, val, test, y):
data = pd.DataFrame(train3)
data['target'] = y
zero = data[data['target'] == 1]
one = data[data['target'] == 0]
zero.drop('target', axis=1, inplace=True)
one.drop('target', axis=1, inplace=True)
clf = KMeans(n_clusters=3)
labels = clf.fit_predict(zero)
zero['labels'] = labels
zero_0 =(zero[zero['labels'] == 0])
zero_1 =(zero[zero['labels'] == 1])
zero_2 =(zero[zero['labels'] == 2])
clf = KMeans(n_clusters=3)
labels = clf.fit_predict(one)
one['labels'] = labels
one_0 =(one[one['labels'] == 0])
one_1 =(one[one['labels'] == 1])
one_2 =(one[one['labels'] == 2])
c_z_0 = get_c(zero_0)
c_z_1 = get_c(zero_1)
c_z_2 = get_c(zero_2)
c_o_0 = get_c(one_0)
c_o_1 = get_c(one_1)
c_o_2 = get_c(one_2)
pred_val = [0]*val.shape[0]
for i in range(val.shape[0]):
array = val.loc[i]
dist0_0 = dist(array, c_z_0)
dist0_1 = dist(array, c_z_1)
dist0_2 = dist(array, c_z_2)
dist1_0 = dist(array, c_o_0)
dist1_1 = dist(array, c_o_1)
dist1_2 = dist(array, c_o_2)
aggr =(dist0_0+dist0_1+dist0_2 +dist1_0+dist1_1+dist1_2)/3
dist1 =(dist1_0+dist1_1+dist1_2)/ 3
dist0 =(dist0_0+dist0_1+dist0_2)/ 3
pred_val[i] = 1-1/np.exp(dist1/aggr)
pred_test = [0]*test.shape[0]
for i in range(test.shape[0]):
array = test.loc[i]
dist0_0 = dist(array, c_z_0)
dist0_1 = dist(array, c_z_1)
dist0_2 = dist(array, c_z_2)
dist1_0 = dist(array, c_o_0)
dist1_1 = dist(array, c_o_1)
dist1_2 = dist(array, c_o_2)
aggr =(dist0_0+dist0_1+dist0_2 +dist1_0+dist1_1+dist1_2)/4
dist1 =(dist1_0+dist1_1+dist1_2)/ 3
dist0 =(dist0_0+dist0_1+dist0_2)/ 3
pred_test[i] = 1-1/np.exp(dist1/aggr)
return np.array(pred_val), np.array(pred_test)
|
Instant Gratification
|
3,961,870 |
big['XFam'] = big['SibSp'] + big['Parch'] + 1
big['XFam'] = np.log1p(( big['XFam'] - big['XFam'].mean())/ big['XFam'].std() )<feature_engineering>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
cols = [c for c in train.columns if c not in ['id', 'target']]
for k in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==k]
test2 = test[test['wheezy-copper-turtle-magic']==k]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
data2 = VarianceThreshold(threshold=2 ).fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for i,(train_index, test_index)in enumerate(skf.split(train3, train2['target'])) :
oof[idx1[test_index]], test_pred = classify(pd.DataFrame(train3[train_index,:]), pd.DataFrame(train3[test_index,:]), pd.DataFrame(test3), train2.loc[train_index]['target'])
preds[idx2] += test_pred / skf.n_splits
print(roc_auc_score(train2.loc[test_index]['target'], oof[idx1[test_index]]))
if k==5: break
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
|
Instant Gratification
|
4,311,731 |
big['Last_Name'] = big['Name'].apply(lambda x: str.split(x, ",")[0])
big['Fare'].fillna(big['Fare'].mean() , inplace=True)
DEFAULT_SURVIVAL_VALUE = 0.5
big['Family_Survival'] = DEFAULT_SURVIVAL_VALUE
for grp, grp_df in big[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId',
'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
big.loc[big['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
big.loc[big['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passengers with family survival information:", big.loc[big['Family_Survival']!=0.5].shape[0] )<groupby>
|
warnings.filterwarnings('ignore')
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' )
|
Instant Gratification
|
4,311,731 |
for _, grp_df in big.groupby('Ticket'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
big.loc[big['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
big.loc[big['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passenger with family/group survival information: "+str(big[big['Family_Survival']!=0.5].shape[0]))<drop_column>
|
RANDOM_SEED = 4123
cols = [
c for c in train.columns
if c not in ['id', 'target', 'wheezy-copper-turtle-magic']
]
def get_mean_cov(x,y):
model = GraphicalLasso()
ones =(y==1 ).astype(bool)
x2 = x[ones]
model.fit(x2)
p1 = model.precision_
m1 = model.location_
onesb =(y==0 ).astype(bool)
x2b = x[onesb]
model.fit(x2b)
p2 = model.precision_
m2 = model.location_
ms = np.stack([m1,m2])
ps = np.stack([p1,p2])
return ms,ps
|
Instant Gratification
|
4,311,731 |
del big['Ticket'], big['Cabin'], big['Name'], big['XName'], big['Last_Name']<count_values>
|
%%time
SKIP_COMMIT = True
if SKIP_COMMIT:
sub = pd.read_csv('.. /input/sample_submission.csv')
if sub.shape[0] < 200000:
sub = pd.read_csv('.. /input/sample_submission.csv')
sub.to_csv('submission.csv', index=False)
raise ValueError('Stop!!!')
oof_nusvc = np.zeros(len(train))
preds_nusvc = np.zeros(len(test))
oof_nb= np.zeros(len(train))
preds_nb = np.zeros(len(test))
oof_lr = np.zeros(len(train))
preds_lr = np.zeros(len(test))
oof_qda = np.zeros(len(train))
preds_qda = np.zeros(len(test))
oof_lp = np.zeros(len(train))
preds_lp = np.zeros(len(test))
oof_lgbm = np.zeros(len(train))
preds_lgbm = np.zeros(len(test))
oof_gm = np.zeros(len(train))
preds_gm = np.zeros(len(test))
oof_rf = np.zeros(len(train))
preds_rf = np.zeros(len(test))
params_lgbm_1 = {
'boosting_type': 'gbdt',
'objective': 'xentropy',
'metric': ['auc'],
'num_leaves': 31,
'learning_rate': 0.5,
'feature_fraction': 1.0,
'bagging_fraction': 1.0,
'bagging_freq': 18,
'num_threads': 8,
'lambda_l2': 5.0,
'max_bin': 3
}
for i in range(512):
print(i, end=' ')
train2 = train[train['wheezy-copper-turtle-magic']==i]
idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx2 = test2.index
data = pd.concat(
[
train2,
test2
],
axis=0
)
train2.reset_index(drop=True, inplace=True)
train_size = train2.shape[0]
sel = VarianceThreshold(threshold=1.5)
tmp = sel.fit_transform(
data[cols]
)
train3 = tmp[:train_size, :]
test3 = tmp[train_size:, :]
ss = StandardScaler()
tmp_scaled = ss.fit_transform(tmp)
train3_scaled = tmp_scaled[:train_size, :]
test3_scaled = tmp_scaled[train_size:, :]
poly = PolynomialFeatures(degree=2)
tmp_poly = poly.fit_transform(tmp_scaled)
train3_poly = tmp_poly[:train_size, :]
test3_poly = tmp_poly[train_size:, :]
gm_clf_4 = mixture.GaussianMixture(
n_components=4,
random_state=RANDOM_SEED
)
gm_tmp_4 = gm_clf_4.fit_predict(tmp ).reshape(-1, 1)
le_4 = OneHotEncoder()
gm_tmp_4 = le_4.fit_transform(gm_tmp_4 ).todense()
gm_train3_4 = gm_tmp_4[:train_size, :]
gm_test3_4 = gm_tmp_4[train_size:, :]
gm_clf_6 = mixture.GaussianMixture(
n_components=6,
random_state=RANDOM_SEED
)
gm_tmp_6 = gm_clf_6.fit_predict(tmp ).reshape(-1, 1)
le_6 = OneHotEncoder()
gm_tmp_6 = le_6.fit_transform(gm_tmp_6 ).todense()
gm_train3_6 = gm_tmp_6[:train_size, :]
gm_test3_6 = gm_tmp_6[train_size:, :]
skf = StratifiedKFold(
n_splits=11,
random_state=RANDOM_SEED,
shuffle=True
)
for train_index, test_index in skf.split(train3, train2['target']):
train_train_index, train_val_index = train_test_split(
train_index,
test_size=0.3,
random_state=RANDOM_SEED
)
train_dataset = lgb.Dataset(
np.hstack(
(
train3_scaled[train_train_index,:],
gm_tmp_4[:train_size, :][train_train_index, :].tolist() ,
gm_tmp_6[:train_size, :][train_train_index, :].tolist() ,
train3_poly[train_train_index,:]
)
),
train2.loc[train_train_index]['target'],
free_raw_data=False
)
valid_dataset = lgb.Dataset(
np.hstack(
(
train3_scaled[train_val_index,:],
gm_tmp_4[:train_size, :][train_val_index, :].tolist() ,
gm_tmp_6[:train_size, :][train_val_index, :].tolist() ,
train3_poly[train_val_index,:]
)
),
train2.loc[train_val_index]['target'],
free_raw_data=False
)
gm = lgb.train(
params_lgbm_1,
train_dataset,
num_boost_round=1000,
early_stopping_rounds=20,
valid_sets=(train_dataset, valid_dataset),
valid_names=('train', 'valid'),
feature_name=[str(l)for l in range(
np.hstack(
(
train3_scaled[test_index,:],
gm_tmp_4[:train_size, :][test_index, :].tolist() ,
gm_tmp_6[:train_size, :][test_index, :].tolist() ,
train3_poly[test_index,:]
)
).shape[1]
)],
categorical_feature=[str(l)for l in
range(
train3_scaled.shape[1],
train3_scaled.shape[1] + gm_tmp_4.shape[1] + gm_tmp_6.shape[1]
)
],
verbose_eval=0
)
oof_lgbm[idx1[test_index]] = gm.predict(
np.hstack(
(
train3_scaled[test_index,:],
gm_tmp_4[:train_size, :][test_index, :].tolist() ,
gm_tmp_6[:train_size, :][test_index, :].tolist() ,
train3_poly[test_index,:]
)
)
)
preds_lgbm[idx2] += gm.predict(
np.hstack(
(
test3_scaled,
gm_test3_4.tolist() ,
gm_test3_6.tolist() ,
test3_poly
)
)
)/ skf.n_splits
ms, ps = get_mean_cov(
train3[train_index, :],
train2.loc[train_index]['target'].values
)
gm = mixture.GaussianMixture(
n_components=2,
init_params='random',
covariance_type='full',
tol=0.001,
reg_covar=0.001,
max_iter=100,
n_init=1,
means_init=ms,
precisions_init=ps,
random_state=RANDOM_SEED
)
gm.fit(tmp)
oof_gm[idx1[test_index]] = gm.predict_proba(
train3[test_index,:]
)[:, 0]
preds_gm[idx2] += gm.predict_proba(
test3
)[:, 0] / skf.n_splits
lp = LabelPropagation(
kernel='rbf',
gamma=0.15301581563198507,
n_jobs=-1
)
lp.fit(
train3_scaled[train_index,:],
train2.loc[train_index]['target']
)
oof_lp[idx1[test_index]] = lp.predict_proba(
train3_scaled[test_index, :]
)[:,1]
preds_lp[idx2] += lp.predict_proba(
test3_scaled
)[:,1] / skf.n_splits
clf = NuSVC(
probability=True,
kernel='poly',
degree=2,
gamma='auto',
random_state=RANDOM_SEED,
nu=0.27312143533915767,
coef0=0.4690615598786931
)
clf.fit(
np.hstack(
(
train3_scaled[train_index,:],
gm_train3_4[train_index, :],
gm_train3_6[train_index, :]
)
),
train2.loc[train_index]['target']
)
oof_nusvc[idx1[test_index]] = clf.predict_proba(
np.hstack(
(
train3_scaled[test_index,:],
gm_train3_4[test_index, :],
gm_train3_6[test_index, :]
)
)
)[:,1]
preds_nusvc[idx2] += clf.predict_proba(
np.hstack(
(
test3_scaled,
gm_test3_4,
gm_test3_6
)
)
)[:,1] / skf.n_splits
clf = RandomForestClassifier(
max_depth=4,
n_jobs=-1,
n_estimators=20
)
clf.fit(
np.hstack(
(
train3_scaled[train_index,:],
gm_train3_4[train_index, :],
gm_train3_6[train_index, :]
)
),
train2.loc[train_index]['target']
)
oof_rf[idx1[test_index]] = clf.predict_proba(
np.hstack(
(
train3_scaled[test_index,:],
gm_train3_4[test_index, :],
gm_train3_6[test_index, :]
)
)
)[:,1]
preds_rf[idx2] += clf.predict_proba(
np.hstack(
(
test3_scaled,
gm_test3_4,
gm_test3_6
)
)
)[:,1] / skf.n_splits
clf = QuadraticDiscriminantAnalysis(
reg_param=0.5674164995882528
)
clf.fit(
train3[train_index,:],
train2.loc[train_index]['target']
)
oof_qda[idx1[test_index]] += clf.predict_proba(
train3[test_index, :]
)[:,1]
preds_qda[idx2] += clf.predict_proba(
test3
)[:,1] / skf.n_splits
clf = linear_model.LogisticRegression(
solver='saga',
penalty='l2',
C=0.01,
tol=0.001,
random_state=RANDOM_SEED
)
clf.fit(
train3_poly[train_index,:],
train2.loc[train_index]['target']
)
oof_lr[idx1[test_index]] = clf.predict_proba(
train3_poly[test_index,:]
)[:,1]
preds_lr[idx2] += clf.predict_proba(
test3_poly
)[:,1] / skf.n_splits
clf = GaussianNB()
clf.fit(
np.hstack(
(
train3_scaled[train_index,:],
gm_train3_6[train_index, :],
gm_train3_4[train_index, :]
)
),
train2.loc[train_index]['target']
)
oof_nb[idx1[test_index]] = clf.predict_proba(
np.hstack(
(
train3_scaled[test_index,:],
gm_train3_6[test_index, :],
gm_train3_4[test_index, :]
)
)
)[:,1]
preds_nb[idx2] += clf.predict_proba(
np.hstack(
(
test3_scaled,
gm_test3_6,
gm_test3_4
)
)
)[:,1] / skf.n_splits
print('
svcnu', roc_auc_score(train['target'], oof_nusvc))
print('gm', roc_auc_score(train['target'], oof_gm))
print('qda', roc_auc_score(train['target'], oof_qda))
print('log reg poly', roc_auc_score(train['target'], oof_lr))
print('gnb', roc_auc_score(train['target'], oof_nb))
print('lp', roc_auc_score(train['target'], oof_lp))
print('lgbm', roc_auc_score(train['target'], oof_lgbm))
print('rf', roc_auc_score(train['target'], oof_rf))
oof_qda = oof_qda.reshape(-1, 1)
preds_qda = preds_qda.reshape(-1, 1)
oof_lr = oof_lr.reshape(-1, 1)
preds_lr = preds_lr.reshape(-1, 1)
oof_nusvc = oof_nusvc.reshape(-1, 1)
preds_nusvc = preds_nusvc.reshape(-1, 1)
oof_nb = oof_nb.reshape(-1, 1)
preds_nb = preds_nb.reshape(-1, 1)
oof_lp = oof_lp.reshape(-1, 1)
preds_lp = preds_lp.reshape(-1, 1)
oof_gm = oof_gm.reshape(-1, 1)
preds_gm = preds_gm.reshape(-1, 1)
oof_lgbm = oof_lgbm.reshape(-1, 1)
preds_lgbm = preds_lgbm.reshape(-1, 1)
oof_rf = oof_rf.reshape(-1, 1)
preds_rf = preds_rf.reshape(-1, 1)
tr_2 = np.concatenate(
(
oof_qda,
oof_nusvc,
oof_lr,
oof_nb,
oof_lp,
oof_gm,
oof_lgbm,
oof_rf
),
axis=1
)
te_2 = np.concatenate(
(
preds_qda,
preds_nusvc,
preds_lr,
preds_nb,
preds_lp,
preds_gm,
preds_lgbm,
preds_rf
),
axis=1
)
print(np.corrcoef(tr_2, rowvar=False))
params = {
'boosting_type': 'gbdt',
'objective': 'xentropy',
'metric': ['auc'],
'num_leaves': 3,
'learning_rate': 0.1,
'feature_fraction': 0.4,
'bagging_fraction': 0.4,
'bagging_freq': 5,
'num_threads': 8
}
params.update(
{
'bagging_fraction': 0.9687497922020039,
'bagging_freq': 100,
'feature_fraction': 0.7578027095458152,
'lambda_l2': 4.871836452096843,
'learning_rate': 0.41230192513715164,
'max_bin': 20,
'num_leaves': 8
}
)
oof_boosting_2_bad_cv = np.zeros(train.shape[0])
pred_te_boosting_2_bad_cv = np.zeros(test.shape[0])
train2 = train.copy()
train2.reset_index(drop=True,inplace=True)
skf = StratifiedKFold(
n_splits=11,
random_state=RANDOM_SEED,
shuffle=True
)
for train_index, test_index in skf.split(tr_2, train2['target']):
train_dataset = lgb.Dataset(
np.hstack(
(
tr_2[train_index, :],
tr_2[train_index, :] ** 2.587508645172711
)
),
train2['target'][train_index],
free_raw_data=False
)
valid_dataset = lgb.Dataset(
np.hstack(
(
tr_2[test_index, :] ,
tr_2[test_index, :] ** 2.587508645172711
)
),
train2['target'][test_index],
free_raw_data=False
)
gbm = lgb.train(
params,
train_dataset,
num_boost_round=1000,
early_stopping_rounds=100,
valid_sets=(train_dataset, valid_dataset),
valid_names=('train', 'valid'),
verbose_eval=100
)
oof_boosting_2_bad_cv[test_index] = gbm.predict(
np.hstack(
(
tr_2[test_index, :],
tr_2[test_index, :] ** 2.587508645172711
)
)
)
pred_te_boosting_2_bad_cv += gbm.predict(
np.hstack(
(
te_2,
te_2 ** 2.587508645172711
)
)
)/ skf.n_splits
print('gnb', roc_auc_score(train['target'], oof_boosting_2_bad_cv))
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = pred_te_boosting_2_bad_cv
sub.to_csv('submission.csv', index=False )
|
Instant Gratification
|
4,239,340 |
print(big['TName'].value_counts())
big['XWho'] = big['TName']
for i in [ 'Master.', 'Sir.', 'Don.', 'Lady.', 'Dona.', 'the Countess.', 'Mme.' ]:
big['XWho'][big['TName'] == i] = "High."
for i in [ 'Col.', 'Major.', 'Capt.' ]:
big['XWho'][big['TName'] == i] = "Mil."
for i in [ 'Mr.', 'Dr.', 'Rev.' ]:
big['XWho'][big['TName'] == i] = "Mr."
for i in [ 'Mrs.', 'Ms.', 'Mlle.', 'Miss.' ]:
big['XWho'][big['TName'] == i] = "Miss."
big['XWho'][~big['TName'].isin([ 'Sir.', 'Don.', 'Lady.', 'Dona.', 'the Countess.', 'Col.',
'Major.', 'Capt.', 'Mr.', 'Master.', 'Dr.', 'Rev.', 'Mrs.',
'Ms.', 'Mlle.', 'Mme.', 'Miss.' ])] = "Oth."
print(big['XWho'].value_counts() )<categorify>
|
def get_mean_cov(x,y):
model = GraphicalLasso()
ones =(y==1 ).astype(bool)
x2 = x[ones]
model.fit(x2)
p1 = model.precision_
m1 = model.location_
onesb =(y==0 ).astype(bool)
x2b = x[onesb]
model.fit(x2b)
p2 = model.precision_
m2 = model.location_
ms = np.stack([m1,m2])
ps = np.stack([p1,p2])
return ms,ps
|
Instant Gratification
|
4,239,340 |
for col in [ 'Sex', 'Pclass', 'XWho', 'Embarked', 'LTick', 'XCabin', 'TName' ]:
dummy = pd.get_dummies(big[col],prefix=str(col),prefix_sep="__")
big = pd.concat([big, dummy], axis=1)
big.drop(col, inplace=True, axis=1)
for col in [ 'XFam' ]:
lbl = LabelEncoder()
lbl.fit(list(big[col].values))
big[col] = lbl.transform(list(big[col].values))<filter>
|
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
ms, ps = get_mean_cov(train3,train2['target'].values)
skf = StratifiedKFold(n_splits=15, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3, train2['target']):
P = train3[train_index,:]
T = train2.loc[train_index]['target'].values
gm = GaussianMixture(n_components=2, init_params='kmeans', covariance_type='full', tol=0.1,reg_covar=0.1, max_iter=150, n_init=5,means_init=ms, precisions_init=ps)
gm.fit(np.concatenate([P,test3],axis = 0))
oof[idx1[test_index]] = gm.predict_proba(train3[test_index,:])[:,0]
preds[idx2] += gm.predict_proba(test3)[:,0] / skf.n_splits
auc = roc_auc_score(train['target'],oof)
print('QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,239,340 |
<feature_engineering><EOS>
|
sub = pd.read_csv('.. /input/instant-gratification/sample_submission.csv')
sub['target'] = preds
sub.to_csv('submission.csv',index=False )
|
Instant Gratification
|
4,324,396 |
<feature_engineering><EOS>
|
warnings.filterwarnings('ignore')
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
print(train.shape, test.shape)
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
for itr in range(4):
test['target'] = preds
test.loc[test['target'] > 0.955, 'target'] = 1
test.loc[test['target'] < 0.045, 'target'] = 0
usefull_test = test[(test['target'] == 1)|(test['target'] == 0)]
new_train = pd.concat([train, usefull_test] ).reset_index(drop=True)
print(usefull_test.shape[0], "Test Records added for iteration : ", itr)
new_train.loc[oof > 0.995, 'target'] = 1
new_train.loc[oof < 0.005, 'target'] = 0
oof2 = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = new_train[new_train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train[train['wheezy-copper-turtle-magic']==i].index
idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]
test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=time.time)
for train_index, test_index in skf.split(train2, train2['target']):
oof_test_index = [t for t in test_index if t < len(idx1)]
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
if len(oof_test_index)> 0:
oof2[idx1[oof_test_index]] = clf.predict_proba(train3[oof_test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof2)
print(f'AUC: {auc:.5}')
sub1 = pd.read_csv('.. /input/sample_submission.csv')
sub1['target'] = preds
|
Instant Gratification
|
4,268,684 |
<SOS> metric: AUC Kaggle data source: instant-gratification<feature_engineering>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
train.head()
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic' )
|
Instant Gratification
|
4,268,684 |
train['Fare'] = np.log1p(( train['Fare'] - train['Fare'].mean())/ train['Fare'].std())
test['Fare'] = np.log1p(( test['Fare'] - test['Fare'].mean())/ test['Fare'].std() )<feature_engineering>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
data2 = VarianceThreshold(threshold=2 ).fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}' )
|
Instant Gratification
|
4,268,684 |
train['c_mean'] = pd.Series(train.mean(axis=1), index=train.index)
c_mean_max = train['c_mean'].max()
c_mean_min = train['c_mean'].min()
c_mean_scaled =(train.c_mean-c_mean_min)/ c_mean_max
train['c_mean_s'] = pd.Series(c_mean_scaled, index=train.index)
del train['c_mean']
train['c_std'] = pd.Series(train.std(axis=1), index=train.index)
c_std_max = train['c_std'].max()
c_std_min = train['c_std'].min()
c_std_scaled =(train.c_std-c_std_min)/ c_std_max
train['c_std_s'] = np.log1p(pd.Series(c_std_scaled, index=train.index))
del train['c_std']
test['c_mean'] = pd.Series(test.mean(axis=1), index=test.index)
c_mean_max = test['c_mean'].max()
c_mean_min = test['c_mean'].min()
c_mean_scaled =(test.c_mean-c_mean_min)/ c_mean_max
test['c_mean_s'] = np.log1p(pd.Series(c_mean_scaled, index=test.index))
del test['c_mean']
test['c_std'] = pd.Series(test.std(axis=1), index=test.index)
c_std_max = test['c_std'].max()
c_std_min = test['c_std'].min()
c_std_scaled =(test.c_std-c_std_min)/ c_std_max
test['c_std_s'] = pd.Series(c_std_scaled, index=test.index)
del test['c_std']
print(train.shape, test.shape )<count_duplicates>
|
train.loc[oof > 0.99, 'target'] = 1
train.loc[oof < 0.01, 'target'] = 0
|
Instant Gratification
|
4,268,684 |
print(train.shape)
train.drop_duplicates(inplace=True)
print(train.shape )<data_type_conversions>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
sel = VarianceThreshold(threshold=1.5 ).fit(data[cols])
data2 = sel.transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3, train2['target']):
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof)
print('QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,268,684 |
print(" <*> Debut")
kDate = time.strftime('%d%m%y_%H%M%S',time.localtime())
start = time.time()
y_train = train['Survived'].values.astype(np.float64)
x_train = train.drop(['PassengerId', 'Survived'], axis=1 ).values.astype(np.float64)
x_test = test.drop(['PassengerId'], axis=1 ).values.astype(np.float64)
print('Shape train: {}
Shape test: {}
Shape Y: {}'.format(x_train.shape, x_test.shape, y_train.shape))
NSplit = 5
SliceTrain = 0.75
SliceTest = 0.25
models = []
NIter = 0
TScore = 0
print("Entrainement")
rs = StratifiedShuffleSplit(n_splits=NSplit, random_state=99, test_size=SliceTest)
for train_index, test_index in rs.split(x_train, y_train):
X_train = x_train[train_index]
Y_train = y_train[train_index]
X_valid = x_train[test_index]
Y_valid = y_train[test_index]
rfc_params = {}
rfc_params['n_estimators'] = 200
rfc_params['learning_rate'] = 0.015
rfc_params['max_depth'] = 250
rfc_params['max_features'] = "auto"
rfc_params['min_samples_split'] = 0.7
rfc_params['min_samples_leaf'] = 0.01
rfc_params['random_state'] = 0
rfc_params['verbose'] = 0
sum_score = 0
score = 0
clf = GradientBoostingClassifier(**rfc_params)
clf.fit(X_train, Y_train)
models.append(clf)
score = clf.score(X_valid, Y_valid)
print(" <*> Entrainement ",NIter," avec ", SliceTrain, " pour train et ",SliceTest," pour test - Score : ", score)
TScore += score
NIter += 1
TScore /= NSplit
print(" <*> ---------------- Resultats CV ------------------ ")
print(" <*> params : ",rfc_params)
print(" <*> Score Moyenne training : ", TScore)
print("Verification avec le train")
score = 0
SCLOG = 0
NIter = 0
for clf in models:
PTrain = clf.predict(x_train)
score = clf.score(x_train, y_train)
SCLOG += score
NIter += 1
SCLOG /= NSplit
print(" <*> Score Moyenne Train : ", SCLOG)
print("Predictions")
NIter = 0
ctb_pred1 = []
for clf in models:
PTest = clf.predict(x_test)
ctb_pred1.append(PTest)
NIter += 1
PTest = [0] * len(ctb_pred1[0])
for i in range(NSplit):
PTest += ctb_pred1[i]
PTest /= NSplit
print(pd.DataFrame(PTest ).head())
end = time.time()
print(" <*> Duree : ",end - start)
print(" <*> Fin" )<save_to_csv>
|
cat_dict = dict()
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
cat_dict[i] = train3.shape[1]
|
Instant Gratification
|
4,268,684 |
print(" Mise a jour des colonnes submit")
submit['Survived'] = np.clip(PTest, 0, 1 ).astype(int)
localtime = time.localtime(time.time())
WDate = str(localtime.tm_mday ).rjust(2, '0')+str(localtime.tm_mon ).rjust(2, '0')+str(localtime.tm_year)
SUBFIC = SUBINT_DIR+"Titanic_GBR_"+str(kDate)+".csv"
print(" <*> Ecriture deb CSV/7z : ", SUBFIC)
submit.to_csv(SUBFIC, index=False)
print(" <*> Ecriture fin CSV/7z : ", SUBFIC )<set_options>
|
test['target'] = preds
oof_var = np.zeros(len(train))
preds_var = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
pca = PCA(n_components=cat_dict[k], random_state= 1234)
pca.fit(train2p[cols])
train3p = pca.transform(train2p[cols])
train3 = pca.transform(train2[cols])
test3 = pca.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_var[idx1[test_index3]] += clf.predict_proba(train3[test_index3,:])[:,1]
preds_var[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof_var)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,268,684 |
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def readCsv(file):
with open(file, encoding='utf8', mode='r')as f:
return pd.read_csv(f)
def seeNorm(series):
sns.distplot(series)
print('偏度系数', series.skew())
print('峰度系数', series.kurt())
def drawHeatMap(dataset, target, k=-1):
corrmat = dataset.corr()
cols = dataset.columns
if -1 != k:
cols = corrmat.nlargest(k, target)[target].index
cm = np.corrcoef(dataset[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot = True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values,
xticklabels=cols.values, cmap='coolwarm')
plt.show()
return cols
def drawPairplot(dataset, cols):
sns.set()
sns.pairplot(dataset[cols], size=2.5)
plt.show()
def seeMissing(dataset):
total = dataset.isnull().sum()
percent =(total/dataset.isnull().count())*100
total = total.drop(total[total == 0].index)
percent = percent.drop(percent[percent == 0].index)
total = total.sort_values(ascending=False)
percent = percent.sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Missing Ratio'])
print(missing_data.head(20))
f, ax = plt.subplots(figsize=(15,12))
plt.xticks(rotation='90')
if percent.count() != 0:
sns.barplot(x=percent.index, y =percent)
plt.xlabel('Features', fontsize=15)
plt.ylabel('Percent of missing values', fontsize=15)
plt.title('Percent missing data by feature', fontsize=15)
def drawScatter(dataset, colx, coly):
fig, ax = plt.subplots()
ax.scatter(x=dataset[colx], y = dataset[coly])
plt.xlabel(colx, fontsize=13)
plt.ylabel(coly, fontsize=13)
plt.show()
def drawDist(dataset, col, title=''):
print('skewness', dataset[col].skew())
print('kurt', dataset[col].kurt())
sns.distplot(dataset[col], fit = norm)
(mu, sigma)= norm.fit(dataset[col])
print('mu = {:.2f} and sigma = {:.2f}'.format(mu, sigma))
plt.legend(['Normal dist.( $\mu=$ {:.2f} and $\sigma=$ {:.2f}'.format(mu, sigma)], loc='best')
plt.ylabel('Frequency')
plt.title('{} distribution'.format(title))
plt.show()
def drawQQ(dataset, col):
fig = plt.figure()
res = stats.probplot(dataset[col], plot=plt)
plt.show()
def drawBox(dataset, xcol, ycol):
plt.figure(figsize=(18, 8))
sns.boxplot(x=dataset[xcol], y=dataset[ycol])
def delCol(dataset, col):
dataset.drop(columns=col, axis=1, inplace=True)
class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
def fit(self, X, y):
self.models_ = [clone(x)for x in self.models]
for model in self.models_:
model.fit(X, y)
return self
def predict(self, X):
predictions = np.column_stack([
model.predict(X)for model in self.models_
])
return np.mean(predictions, axis=1)
class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
def fit(self, X, y):
self.models_ = [clone(x)for x in self.models]
for model in self.models_:
model.fit(X, y)
return self
def predict(self, X):
predictions = np.column_stack([
model.predict(X)for model in self.models_
])
return np.mean(predictions, axis = 1)
class DataMining() :
def __init__(self):
dirname = '/kaggle/input/titanic'
self.train = readCsv('{}/train.csv'.format(dirname))
self.test = readCsv('{}/test.csv'.format(dirname))
self.idName = 'PassengerId'
self.targetName = 'Survived'
self.trainX = self.train.drop([self.idName, self.targetName], axis = 1)
self.trainY = self.train[self.targetName]
self.testId = self.test[self.idName]
self.testX = self.test.drop(self.idName, axis = 1)
def preprocessing(self):
self.fillMissing()
self.mergeDataset()
self.dataset = self.dataset.fillna(np.nan)
self.dataset.drop(['Name'], axis=1, inplace=True)
print(self.dataset.head())
self.labelEncoding(['Sex', 'Ticket', 'Cabin', 'Embarked'])
self.splitDataset()
def mergeDataset(self):
self.train_len = self.trainX.shape[0]
self.dataset = pd.concat([self.trainX, self.testX], axis=0 ).reset_index(drop=True)
def splitDataset(self):
self.trainX = self.dataset[:self.train_len]
self.testX = self.dataset[self.train_len :]
self.trainY = self.trainY.astype(int)
def fillMedian(self, col, kind='train'):
if kind != 'train':
self.testX[col] = self.testX[col].fillna(self.testX[col].median())
else:
self.trainX[col] = self.trainX[col].fillna(self.trainX[col].median())
def fillMode(self, col, kind='train'):
if kind != 'train':
self.testX[col] = self.testX[col].fillna(self.testX[col].mode() [0])
else:
self.trainX[col] = self.trainX[col].fillna(self.trainX[col].mode() [0])
def fillMissing(self):
self.fillMedian('Age')
self.fillMode('Cabin')
self.fillMode('Embarked')
self.fillMode('Cabin', 'test')
self.fillMode('Fare', 'test')
self.fillMode('Age', 'test')
def labelEncoding(self, col):
enc = LabelEncoder()
self.dataset[col] = self.dataset[col].apply(enc.fit_transform)
def clearOutlier(self):
feats = self.train.dtypes[self.train.dtypes != 'object'].index
print(feats)
for f in feats:
Q1 = self.train[f].quantile (.25)
Q3 = self.train[f].quantile (.75)
IQR = Q3 - Q1
threshold = 1.5 * IQR
self.train[(self.train[f] < Q1 - threshold)|(self.train[f] > Q3 + threshold)] = np.nan
def exploring(self):
self.preprocessing()
print('
train missing')
print(self.trainX.isnull().sum())
print('
test missing')
print(self.testX.isnull().sum())
print(self.dataset.head())
tmp = pd.concat([self.trainX, self.trainY], axis=1 ).reset_index(drop=True)
drawHeatMap(tmp, self.targetName, -1)
def score(self, model, name=''):
n_folds = 9
kf = KFold(n_splits= n_folds, random_state=1, shuffle=True)
score = cross_val_score(model, self.trainX.values, self.trainY, cv=kf)
print("{}score: {:.2f}%".format(name, score.mean() *100.0))
def predict(self):
self.ensemble()
self.wrapAns()
def ensemble(self):
all_ans = []
for i in range(len(self.models)) :
self.models[i].fit(self.trainX, self.trainY)
all_ans.append(self.models[i].predict(self.testX))
self.testY = all_ans[1]
def wrapAns(self):
submission = pd.DataFrame()
submission[self.idName] = self.testId
submission[self.targetName] = self.testY.astype('int')
save_dir = '/kaggle/working'
submission.to_csv('{}/submission.csv'.format(save_dir), index=False)
print('the result has been written into the file "submission.csv"')
def modeling(self):
lr = LogisticRegression(random_state=1)
rf = RandomForestClassifier(random_state=1, n_estimators=200, criterion = 'entropy', min_samples_leaf=1, min_samples_split=4)
gboost = GradientBoostingClassifier(learning_rate=0.2,max_depth = 4,n_estimators =100)
self.models = [
rf,
gboost
]
def evaluate(self):
for model in self.models:
self.score(model)
dm = DataMining()
dm.preprocessing()
dm.modeling()
dm.predict()
<set_options>
|
test['target'] = preds_var
oof_var2 = np.zeros(len(train))
preds_var2 = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2p[cols])
train3p = sel.transform(train2p[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_var2[idx1[test_index3]] += clf.predict_proba(train3[test_index3,:])[:,1]
preds_var2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof_var2)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,268,684 |
%matplotlib inline<define_variables>
|
auc = roc_auc_score(train['target'],0.5*(oof_var+ oof_var2))
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,268,684 |
<load_pretrained><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = 0.5* preds_var + 0.5*preds_var2
sub.to_csv('submission.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Final Test.csv predictions')
plt.show()
|
Instant Gratification
|
4,304,570 |
<SOS> metric: AUC Kaggle data source: instant-gratification<load_from_csv>
|
warnings.filterwarnings('ignore' )
|
Instant Gratification
|
4,304,570 |
train_csv = pd.read_csv(extracted_files_path + '/training.csv')
test_csv = pd.read_csv(extracted_files_path + '/test.csv')
looktable_csv = pd.read_csv(Id_table_path )<drop_column>
|
train1 = pd.read_csv('.. /input/train.csv')
test1 = pd.read_csv('.. /input/test.csv')
cols1 = [c for c in train1.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
|
Instant Gratification
|
4,304,570 |
feature_8 = ['left_eye_center_x', 'left_eye_center_y',
'right_eye_center_x','right_eye_center_y',
'nose_tip_x', 'nose_tip_y',
'mouth_center_bottom_lip_x',
'mouth_center_bottom_lip_y', 'Image']
train_8_csv = train_csv[feature_8].dropna().reset_index()
train_30_csv = train_csv.dropna().reset_index()<categorify>
|
def instant_model(train, test, cols = cols1, clf = QuadraticDiscriminantAnalysis(0.5), selection = "PCA"):
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic'] == i]
test2 = test[test['wheezy-copper-turtle-magic'] == i]
idx1 = train2.index
idx2 = test2.index
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
if selection == "variance":
data2 = StandardScaler().fit_transform(VarianceThreshold(threshold=2 ).fit_transform(data[cols]))
train3 = pd.DataFrame(data2[:train2.shape[0]], index = idx1)
test3 = pd.DataFrame(data2[train2.shape[0]:], index = idx2)
elif selection == "PCA":
pca = PCA(n_components = 40, random_state= 1234)
pca.fit(data[:train2.shape[0]])
train3 = pd.DataFrame(pca.transform(data[:train2.shape[0]]), index = idx1)
test3 = pd.DataFrame(pca.transform(data[train2.shape[0]:]), index = idx2)
train3['target'] = train2['target']
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train3, train3['target']):
clf = clf
X_train = train3.iloc[train_index, :].drop(["target"], axis = 1)
X_test = train3.iloc[test_index, :].drop(["target"], axis = 1)
y_train = train3.iloc[train_index, :]['target']
y_test = train3.iloc[test_index, :]['target']
clf.fit(X_train, y_train)
train_prob = clf.predict_proba(X_train)[:,1]
test_prob = clf.predict_proba(X_test)[:,1]
oof[idx1[test_index]] = test_prob
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
return oof, preds
|
Instant Gratification
|
4,304,570 |
def str_to_array(pd_series):
data_size = len(pd_series)
X = np.zeros(shape=(data_size,96,96,1), dtype=np.float32)
for i in tqdm(range(data_size)) :
img_str = pd_series[i]
img_list = img_str.split(' ')
img_array = np.array(img_list, dtype=np.float32)
img_array = img_array.reshape(96,96,1)
X[i] = img_array
return X<save_to_csv>
|
def get_newtrain(train, test, preds, oof):
test['target'] = preds
test.loc[test['target'] > 0.985, 'target'] = 1
test.loc[test['target'] < 0.015, 'target'] = 0
usefull_test = test[(test['target'] == 1)|(test['target'] == 0)]
new_train = pd.concat([train, usefull_test] ).reset_index(drop=True)
new_train.loc[oof > 0.985, 'target'] = 1
new_train.loc[oof < 0.015, 'target'] = 0
return new_train
|
Instant Gratification
|
4,304,570 |
X_train_30 = str_to_array(train_30_csv['Image'])
labels_30 = train_30_csv.drop(['index','Image'], axis=1)
y_train_30 = labels_30.to_numpy(dtype=np.float32)
print('X_train with 30 feature shape: ', X_train_30.shape)
print('y_train with 30 feature shape: ', y_train_30.shape )<save_to_csv>
|
oof_temp, preds_temp = instant_model(train1, test1, selection = 'variance')
newtrain1 = get_newtrain(train1, test1, preds_temp, oof_temp )
|
Instant Gratification
|
4,304,570 |
X_train_8 = str_to_array(train_8_csv['Image'])
labels_8 = train_8_csv.drop(['index','Image'], axis=1)
y_train_8 = labels_8.to_numpy(dtype=np.float32)
print('X_train with 8 feature shape: ', X_train_8.shape)
print('y_train with 8 feature shape: ', y_train_8.shape )<choose_model_class>
|
oof_qda_var, preds_qda_var = instant_model(newtrain1, test1, selection = 'variance')
oof_knn_var, preds_knn_var = instant_model(newtrain1, test1, \
clf = KNeighborsClassifier(n_neighbors = 7, p = 2, weights = 'distance'),\
selection = 'variance' )
|
Instant Gratification
|
4,304,570 |
def create_model(output_n = 30):
model = keras.models.Sequential([
keras.layers.InputLayer(input_shape=[96,96,1]),
keras.layers.Conv2D(filters=32, kernel_size=[5,5], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=32, kernel_size=[5,5], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.MaxPool2D(pool_size=[2,2]),
keras.layers.Conv2D(filters=64, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=64, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.MaxPool2D(pool_size=[2,2]),
keras.layers.Conv2D(filters=128, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=128, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.MaxPool2D(pool_size=[2,2]),
keras.layers.Conv2D(filters=256, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=256, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.MaxPool2D(pool_size=[2,2]),
keras.layers.Conv2D(filters=512, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=512, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Flatten() ,
keras.layers.Dense(units=512, activation='relu'),
keras.layers.Dropout (.1),
keras.layers.Dense(units=output_n),
])
model.compile(optimizer = 'adam' , loss = "mean_squared_error", metrics=["mae"])
return model<choose_model_class>
|
oof_qda_pca, preds_qda_pca = instant_model(newtrain1, test1)
oof_knn_pca, preds_knn_pca = instant_model(newtrain1, test1, \
clf = KNeighborsClassifier(n_neighbors = 7, p = 2, weights = 'distance'))
|
Instant Gratification
|
4,304,570 |
model_30 = create_model(output_n=30)
model_8 = create_model(output_n=8 )<choose_model_class>
|
logit = LogisticRegression()
newX_train_stack = pd.DataFrame({"QDA_var": oof_qda_var, "QDA_pca": oof_qda_pca, \
"KNN_var": oof_knn_var, "KNN_pca": oof_knn_pca})
newX_test_stack = pd.DataFrame({"QDA_var": preds_qda_var, "QDA_pca": preds_qda_pca, \
"KNN_var": preds_knn_var, "KNN_pca": preds_knn_pca})
newy_stack = newtrain1['target']
logit.fit(newX_train_stack, newy_stack)
pred_stack_train = logit.predict_proba(newX_train_stack)[:,1]
pred_stack_test = logit.predict_proba(newX_test_stack)[:,1]
print("ROC_AUC: {0}".format(roc_auc_score(newy_stack, pred_stack_train)))
stack_result = logit.predict_proba(newX_test_stack)[:,1]
|
Instant Gratification
|
4,304,570 |
<train_model><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = stack_result
sub.to_csv('submission_4stack.csv',index=False )
|
Instant Gratification
|
4,270,029 |
<SOS> metric: AUC Kaggle data source: instant-gratification<train_model>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
train.head()
|
Instant Gratification
|
4,270,029 |
history = model_8.fit(X_train_8, y_train_8, validation_split=.1, batch_size=64, epochs=100, callbacks=[LR_callback,EarlyStop_callback] )<train_model>
|
def get_mean_cov(x,y):
model = GraphicalLasso()
ones =(y==1 ).astype(bool)
x2 = x[ones]
model.fit(x2)
p1 = model.precision_
m1 = model.location_
onesb =(y==0 ).astype(bool)
x2b = x[onesb]
model.fit(x2b)
p2 = model.precision_
m2 = model.location_
ms = np.stack([m1,m2])
ps = np.stack([p1,p2])
return ms,ps
|
Instant Gratification
|
4,270,029 |
X_test = str_to_array(test_csv['Image'])
print('X_test shape: ', X_test.shape )<predict_on_test>
|
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3, train2['target']):
ms, ps = get_mean_cov(train3[train_index,:],train2.loc[train_index]['target'].values)
gm = GaussianMixture(n_components=2, init_params='random', covariance_type='full', tol=0.001,reg_covar=0.001, max_iter=100, n_init=1,means_init=ms, precisions_init=ps)
gm.fit(np.concatenate([train3,test3],axis = 0))
oof[idx1[test_index]] = gm.predict_proba(train3[test_index,:])[:,0]
preds[idx2] += gm.predict_proba(test3)[:,0] / skf.n_splits
auc = roc_auc_score(train['target'],oof)
print('QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,270,029 |
y_hat_30 = model_30.predict(X_test)
y_hat_8 = model_8.predict(X_test)
print('Predictions shape', y_hat_30.shape)
print('Predictions shape', y_hat_8.shape )<feature_engineering>
|
cat_dict = dict()
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
cat_dict[i] = train3.shape[1]
|
Instant Gratification
|
4,270,029 |
feature_8_ind = [0, 1, 2, 3, 20, 21, 28, 29]
for i in range(8):
print('Copy "{}" feature column from y_hat_8 --> y_hat_30'.format(feature_8[i]))
y_hat_30[:,feature_8_ind[i]] = y_hat_8[:,i]<define_variables>
|
test['target'] = preds
oof_var = np.zeros(len(train))
preds_var = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
pca = PCA(n_components=cat_dict[k], random_state= 1234)
pca.fit(train2p[cols])
train3p = pca.transform(train2p[cols])
train3 = pca.transform(train2[cols])
test3 = pca.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_var[idx1[test_index3]] += clf.predict_proba(train3[test_index3,:])[:,1]
preds_var[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof_var)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,270,029 |
required_features = list(looktable_csv['FeatureName'])
imageID = list(looktable_csv['ImageId']-1)
feature_to_num = dict(zip(required_features[0:30], range(30)) )<define_variables>
|
test['target'] = preds_var
oof_var2 = np.zeros(len(train))
preds_var2 = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2p[cols])
train3p = sel.transform(train2p[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_var2[idx1[test_index3]] += clf.predict_proba(train3[test_index3,:])[:,1]
preds_var2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof_var2)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,270,029 |
feature_ind = []
for f in required_features:
feature_ind.append(feature_to_num[f] )<define_variables>
|
auc = roc_auc_score(train['target'],0.5*(oof_var+ oof_var2))
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,270,029 |
<save_to_csv><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = 0.5* preds_var + 0.5*preds_var2
sub.to_csv('submission.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Final Test.csv predictions')
plt.show()
|
Instant Gratification
|
4,244,813 |
<SOS> metric: AUC Kaggle data source: instant-gratification<install_modules>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
train.head()
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic' )
|
Instant Gratification
|
4,244,813 |
!wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py
!pip install keras
!pip install tensorflow-hub
!pip install tensorflow<import_modules>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
data2 = VarianceThreshold(threshold=2 ).fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}' )
|
Instant Gratification
|
4,244,813 |
import tokenization
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from keras.callbacks import ModelCheckpoint
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras.layers import Dense, Input, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model<set_options>
|
train.loc[oof > 0.99, 'target'] = 1
train.loc[oof < 0.01, 'target'] = 0
|
Instant Gratification
|
4,244,813 |
plt.style.use('fivethirtyeight')
warnings.filterwarnings("ignore" )<load_from_csv>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
sel = VarianceThreshold(threshold=1.5 ).fit(data[cols])
data2 = sel.transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3, train2['target']):
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof)
print('QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,244,813 |
dataset = pd.read_csv('.. /input/nlp-getting-started/train.csv')
test = pd.read_csv('.. /input/nlp-getting-started/test.csv')
submission = pd.read_csv('.. /input/nlp-getting-started/sample_submission.csv' )<choose_model_class>
|
cat_dict = dict()
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
cat_dict[i] = train3.shape[1]
|
Instant Gratification
|
4,244,813 |
bert_layer = \
hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/2",
trainable=True )<define_variables>
|
test['target'] = preds
oof_var = np.zeros(len(train))
preds_var = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
pca = PCA(n_components=cat_dict[k], random_state= 1234)
pca.fit(train2p[cols])
train3p = pca.transform(train2p[cols])
train3 = pca.transform(train2[cols])
test3 = pca.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_var[idx1[test_index3]] += clf.predict_proba(train3[test_index3,:])[:,1]
preds_var[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof_var)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,244,813 |
max_len = max([len(x.split())for x in dataset.text])+ 1<categorify>
|
test['target'] = preds_var
oof_var2 = np.zeros(len(train))
preds_var2 = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2p[cols])
train3p = sel.transform(train2p[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_var2[idx1[test_index3]] += clf.predict_proba(train3[test_index3,:])[:,1]
preds_var2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof_var2)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,244,813 |
def bert_encode(texts, tokenizer, max_len=None):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[:max_len-2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence)
tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence)+ [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments )<data_type_conversions>
|
auc = roc_auc_score(train['target'],0.5*(oof_var+ oof_var2))
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,244,813 |
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()<choose_model_class>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = 0.5* preds_var + 0.5*preds_var2
sub.to_csv('submission.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Final Test.csv predictions')
plt.show()
|
Instant Gratification
|
4,208,612 |
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case )<categorify>
|
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
sel = VarianceThreshold(threshold=1.5 ).fit(data[cols])
data2 = sel.transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3, train2['target']):
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof)
print('QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,208,612 |
train_input = bert_encode(dataset.text.values, tokenizer, max_len=max_len)
train_labels = dataset.target.values
test_input = bert_encode(test.text, tokenizer, max_len=max_len )<choose_model_class>
|
cat_dict = dict()
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
cat_dict[i] = train3.shape[1]
|
Instant Gratification
|
4,208,612 |
all_inputs = [
Input(shape=(max_len,), dtype=tf.int32),
Input(shape=(max_len,), dtype=tf.int32),
Input(shape=(max_len,), dtype=tf.int32)
]
__, sequence_output = bert_layer(all_inputs)
x = sequence_output[:, 0, :]
x = Dropout(0.5 )(x)
x = Dense(units=32, activation='relu' )(x)
x = Dense(1, activation='sigmoid' )(x)
model = Model(all_inputs, outputs=x)
model.compile(Adam(lr= 0.00001),
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()<train_model>
|
test['target'] = preds
oof_var = np.zeros(len(train))
preds_var = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
pca = PCA(n_components=cat_dict[k], random_state= 1234)
pca.fit(train2p[cols])
train3p = pca.transform(train2p[cols])
train3 = pca.transform(train2[cols])
test3 = pca.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_var[idx1[test_index3]] += clf.predict_proba(train3[test_index3,:])[:,1]
preds_var[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof_var)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,208,612 |
checkpoint = \
ModelCheckpoint('model.h5',
monitor='val_loss',
save_best_only=True,
verbose=1)
train_history = \
model.fit(train_input,
train_labels,
validation_split=0.2,
epochs=5,
callbacks=[checkpoint],
batch_size=16 )<load_pretrained>
|
test['target'] = preds_var
oof_var2 = np.zeros(len(train))
preds_var2 = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2p[cols])
train3p = sel.transform(train2p[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_var2[idx1[test_index3]] += clf.predict_proba(train3[test_index3,:])[:,1]
preds_var2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'],oof_var2)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,208,612 |
model.load_weights('model.h5' )<save_to_csv>
|
auc = roc_auc_score(train['target'],0.5*(oof_var+ oof_var2))
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,208,612 |
<set_options><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = 0.5* preds_var + 0.5*preds_var2
sub.to_csv('submission.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Final Test.csv predictions')
plt.show()
|
Instant Gratification
|
4,121,252 |
<SOS> metric: AUC Kaggle data source: instant-gratification<load_from_csv>
|
import numpy as np, pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.feature_selection import VarianceThreshold
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
|
Instant Gratification
|
4,121,252 |
train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv' )<count_missing_values>
|
print('Loading Train')
train = pd.read_csv('.. /input/train.csv')
print('Loading Test')
test = pd.read_csv('.. /input/test.csv')
print('Finish' )
|
Instant Gratification
|
4,121,252 |
train.isnull().sum()<count_missing_values>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
oof_QDA = np.zeros(len(train))
preds_QDA = np.zeros(len(test))
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
poly = PolynomialFeatures(degree=2)
sc = StandardScaler()
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
data2 = sc.fit_transform(poly.fit_transform(VarianceThreshold(threshold=1.5 ).fit_transform(data[cols])))
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
data2 = VarianceThreshold(threshold=1.5 ).fit_transform(data[cols])
train4 = data2[:train2.shape[0]]; test4 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = LogisticRegression(solver='saga',penalty='l2',C=0.01,tol=0.001)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf_QDA = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf_QDA.fit(train4[train_index,:],train2.loc[train_index]['target'])
oof_QDA[idx1[test_index]] = clf_QDA.predict_proba(train4[test_index,:])[:,1]
preds_QDA[idx2] += clf_QDA.predict_proba(test4)[:,1] / skf.n_splits
if i%64==0:
print(i, 'LR oof auc : ', round(roc_auc_score(train['target'][idx1], oof[idx1]), 5))
print(i, 'QDA oof auc : ', round(roc_auc_score(train['target'][idx1], oof_QDA[idx1]), 5))
|
Instant Gratification
|
4,121,252 |
train.isnull().sum()<string_transform>
|
test['target'] = preds
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2p[cols])
train3p = sel.transform(train2p[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
poly = PolynomialFeatures(degree=2 ).fit(train3p)
train3p = poly.transform(train3p)
train3 = poly.transform(train3)
test3 = poly.transform(test3)
sc2 = StandardScaler()
train3p = sc2.fit_transform(train3p)
train3 = sc2.transform(train3)
test3 = sc2.transform(test3)
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = LogisticRegression(solver='saga',penalty='l2',C=0.01,tol=0.001)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
if k%64==0:
print(k, 'LR2 oof auc : ', round(roc_auc_score(train['target'][idx1], oof[idx1]), 5))
|
Instant Gratification
|
4,121,252 |
stopwords = list(STOP_WORDS)
punct=string.punctuation
def text_data_cleaning(sentence):
doc = nlp(sentence)
tokens = []
for token in doc:
if token.lemma_ != "-PRON-":
temp = token.lemma_.lower().strip()
else:
temp = token.lower_
tokens.append(temp)
cleaned_tokens = []
for token in tokens:
if token not in stopwords and token not in punct:
cleaned_tokens.append(token)
return " ".join(cleaned_tokens )<feature_engineering>
|
test['target'] = preds_QDA
oof_QDA2 = np.zeros(len(train))
preds_QDA2 = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2p[cols])
train3p = sel.transform(train2p[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf_QDA2 = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf_QDA2.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_QDA2[idx1[test_index3]] = clf_QDA2.predict_proba(train3[test_index3,:])[:,1]
preds_QDA2[test2.index] += clf_QDA2.predict_proba(test3)[:,1] / skf.n_splits
if k%64==0:
print(k, 'QDA2 oof auc : ', round(roc_auc_score(train['target'][idx1], oof_QDA2[idx1]), 5))
|
Instant Gratification
|
4,121,252 |
train['text'] = train.text.apply(lambda x: text_data_cleaning(x))
<feature_engineering>
|
print('LR auc: ', round(roc_auc_score(train['target'], oof),5))
print('QDA auc: ', round(roc_auc_score(train['target'], oof_QDA2),5))
|
Instant Gratification
|
4,121,252 |
train.keyword = train.keyword.fillna("")
train['new_text'] = train.text
test.keyword = test.keyword.fillna("")
test['text'] = test.text
test['text'] = test.text.apply(lambda x: text_data_cleaning(x))<split>
|
w_best = 0
oof_best = oof_QDA2
for w in np.arange(0,0.55,0.001):
oof_blend = w*oof+(1-w)*oof_QDA2
if(roc_auc_score(train['target'], oof_blend)) >(roc_auc_score(train['target'], oof_best)) :
w_best = w
oof_best = oof_blend
print(w_best)
print('best weight: ', w_best)
print('auc_best: ', round(roc_auc_score(train['target'], oof_best), 5))
|
Instant Gratification
|
4,121,252 |
<choose_model_class><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = w_best*preds +(1-w_best)*preds_QDA2
sub.to_csv('submission.csv', index=False)
sub.head()
|
Instant Gratification
|
4,090,518 |
<SOS> metric: AUC Kaggle data source: instant-gratification<choose_model_class>
|
warnings.filterwarnings('ignore')
|
Instant Gratification
|
4,090,518 |
def build_model(embed):
model = Sequential([
Input(shape=[], dtype=tf.string),
embed,
Dense(1024, activation='elu'),
BatchNormalization() ,
Dropout(0.5),
Dense(512, activation='elu'),
BatchNormalization() ,
Dropout(0.35),
Dense(256, activation='relu'),
BatchNormalization() ,
Dropout(0.1),
Dense(1, activation='sigmoid')
])
model.compile(Adam(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'])
return model<choose_model_class>
|
%%time
path=Path('.. /input')
def load_data(data):
return pd.read_csv(data)
with multiprocessing.Pool() as pool:
train, test, sub = pool.map(load_data, [path/'train.csv',
path/'test.csv',
path/'sample_submission.csv'] )
|
Instant Gratification
|
4,090,518 |
earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min')
mcp_save = ModelCheckpoint('model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=2, epsilon=1e-4, mode='min' )<train_model>
|
NFOLDS=5
NTRIALS=100
RS=42
debug=0
lowest=0.01
highest=0.99
|
Instant Gratification
|
4,090,518 |
with tf.compat.v1.Session() as session:
tf.compat.v1.keras.backend.set_session(session)
session.run([tf.compat.v1.global_variables_initializer() , tf.compat.v1.tables_initializer() ])
history = model.fit(
X_train, y_train,
validation_data=(X_test,y_test),
epochs=35,
callbacks=[earlyStopping,reduce_lr_loss,mcp_save],
batch_size=128
)<predict_on_test>
|
if debug:
magic_max=2
magic_min=0
NFOLDS=2
NTRIALS=2
else:
magic_max=train['wheezy-copper-turtle-magic'].max()
magic_min=train['wheezy-copper-turtle-magic'].min()
|
Instant Gratification
|
4,090,518 |
with tf.Session() as session:
tf.compat.v1.keras.backend.set_session(session)
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
model.load_weights('model.hdf5')
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred.round().astype(int)) )<save_to_csv>
|
def preprocess(clfs=['QDA'], train=train, test=test, magic_min=magic_min, magic_max=magic_max):
prepr = {}
for i in range(magic_min, magic_max+1):
X = train[train['wheezy-copper-turtle-magic']==i].copy()
Y = X.pop('target' ).values
X_test = test[test['wheezy-copper-turtle-magic']==i].copy()
idx_train = X.index
idx_test = X_test.index
X.reset_index(drop=True,inplace=True)
cols = [c for c in X.columns if c not in ['id', 'wheezy-copper-turtle-magic']]
l=len(X)
X_all = pd.concat([X[cols], X_test[cols]], ignore_index=True)
X_vt = VarianceThreshold(threshold=1.5 ).fit_transform(X_all)
prepr['vt_' + str(i)] = X_vt
prepr['train_size_' + str(i)] = l
prepr['idx_train_' + str(i)] = idx_train
prepr['idx_test_' + str(i)] = idx_test
prepr['target_' + str(i)] = Y
return prepr
|
Instant Gratification
|
4,090,518 |
with tf.Session() as session:
tf.compat.v1.keras.backend.set_session(session)
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
model.load_weights('model.hdf5')
sub = model.predict(test_data)
subm = pd.DataFrame()
subm['id'] = test['id']
subm['target'] = sub.round().astype(int)
subm.to_csv("pred.csv", index = False )<install_modules>
|
%%time
data = preprocess()
|
Instant Gratification
|
4,090,518 |
!pip install fastai --upgrade<import_modules>
|
def get_data(i, data):
l = data['train_size_' + str(i)]
X_all = data['vt_' + str(i)]
X = X_all[:l, :]
X_test = X_all[l:, :]
Y = data['target_' + str(i)]
idx_train = data['idx_train_' + str(i)]
idx_test = data['idx_test_' + str(i)]
return X, X_test, Y, idx_train, idx_test
|
Instant Gratification
|
4,090,518 |
from fastai import *
from fastai.tabular import *<load_from_csv>
|
def pseudolabeling(X_train, X_test, Y_train, Y_pseudo,
idx_test, lowest=lowest, highest=highest, test=test):
assert len(test)== len(Y_pseudo), "The length of test does not match that of Y_pseudo!"
Y_aug = Y_pseudo[idx_test]
assert len(Y_aug)== len(X_test), "The length of Y_aug does not match that of X_test!"
Y_aug[Y_aug > highest] = 1
Y_aug[Y_aug < lowest] = 0
mask =(Y_aug == 1)|(Y_aug == 0)
Y_useful = Y_aug[mask]
X_test_useful = X_test[mask]
X_train_aug = np.vstack(( X_train, X_test_useful))
Y_train_aug = np.vstack(( Y_train.reshape(-1, 1), Y_useful.reshape(-1, 1)))
return X_train_aug, Y_train_aug
|
Instant Gratification
|
4,090,518 |
input_path = '/kaggle/input/'
train_df = pd.read_csv(f'{input_path}train.csv')
test_df = pd.read_csv(f'{input_path}test.csv' )<feature_engineering>
|
def train_classifier(clf_name, clfs, data=data, train=train, test=test,
debug=debug, NFOLDS=NFOLDS, RS=RS, Y_pseudo=None,
magic_min=magic_min, magic_max=magic_max,
lowest=lowest, highest=highest, verbose=1):
auc_all = np.array([])
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in range(magic_min, magic_max+1):
X, X_test, Y, idx_train, idx_test = get_data(i=i, data=data)
folds = StratifiedKFold(n_splits=NFOLDS, random_state=RS)
auc_folds = np.array([])
for train_index, val_index in folds.split(X, Y):
X_train, Y_train = X[train_index, :], Y[train_index]
X_val, Y_val = X[val_index, :], Y[val_index]
if Y_pseudo is not None:
X_train_aug, Y_train_aug = pseudolabeling(X_train, X_test,
Y_train, Y_pseudo, idx_test,
lowest=lowest, highest=highest,
test=test)
clfs[clf_name].fit(X_train_aug, Y_train_aug)
else:
clfs[clf_name].fit(X_train, Y_train)
oof[idx_train[val_index]] = clfs[clf_name].predict_proba(X_val)[:,1]
preds[idx_test] += clfs[clf_name].predict_proba(X_test)[:,1]/NFOLDS
auc = roc_auc_score(Y_val, oof[idx_train[val_index]])
auc_folds = np.append(auc_folds, auc)
auc_all = np.append(auc_all, np.mean(auc_folds))
auc_combo = roc_auc_score(train['target'].values, oof)
auc_av = np.mean(auc_all)
std = np.std(auc_all)/(np.sqrt(NFOLDS)*np.sqrt(magic_max+1))
if verbose:
print(f'The result summary for the {clf_name} classifier:')
print(f'The combined CV score is {round(auc_combo, 5)}.')
print(f'The folds average CV score is {round(auc_av, 5)}.')
print(f'The standard deviation is {round(std, 5)}.
')
return preds, auc_combo
|
Instant Gratification
|
4,090,518 |
for df in [train_df, test_df]:
df['Title'] = df['Name'].str.split(',' ).str[1].str.split(' ' ).str[1]
df['Deck'] = df['Cabin'].str[0]
all_df = pd.concat([train_df, test_df], sort=False)
mean_age_by_title = all_df.groupby('Title' ).mean() ['Age']
for df in [train_df, test_df]:
for title, age in mean_age_by_title.iteritems() :
df.loc[df['Age'].isnull() &(df['Title'] == title), 'Age'] = age<correct_missing_values>
|
%%time
results = {}
results['rp']=np.array([])
results['auc']=np.array([])
np.random.seed(RS)
for j in range(NTRIALS):
rp=10**(-2*np.random.rand())
clfs_init={'QDA': QuadraticDiscriminantAnalysis(reg_param=rp)}
clfs={'QDA': QuadraticDiscriminantAnalysis(reg_param=rp)}
Y_pseudo, _ = train_classifier('QDA', clfs=clfs_init, verbose=0)
_, auc = train_classifier('QDA', clfs=clfs, Y_pseudo=Y_pseudo, verbose=0)
results['rp']=np.append(results['rp'], rp)
results['auc']=np.append(results['auc'], auc)
print(f"Trial number {j}: AUC = {round(auc, 5)}, rp={round(rp, 5)}.
" )
|
Instant Gratification
|
4,090,518 |
test_df.Fare.fillna(0,inplace=True )<split>
|
auc_max = np.max(results['auc'])
i_max = np.argmax(results['auc'])
rp_best = results['rp'][i_max]
print(f"The highest AUC achived is {round(auc_max, 5)} for rp={round(rp_best, 5)}.")
auc_min = np.min(results['auc'])
i_min = np.argmin(results['auc'])
print(f"The lowest AUC achived is {round(auc_min, 5)} for rp={round(results['rp'][i_min], 5)}.")
print(f"The smallest value of `reg_param` that was explored during the search is {round(np.min(results['rp']), 5)}.")
print(f"The larges value of `reg_param` that was explored during the search is {round(np.max(results['rp']), 5)}." )
|
Instant Gratification
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.