kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
4,090,518 |
dep_var = 'Survived'
cat_names = ['Pclass', 'Sex', 'Embarked', 'Title', 'Deck']
cont_names = ['Age', 'Fare', 'SibSp', 'Parch']
procs = [FillMissing, Categorify, Normalize]
test = TabularList.from_df(test_df, cat_names=cat_names, cont_names=cont_names, procs=procs)
data =(TabularList.from_df(train_df, path='.', cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(list(range(0,200)))
.label_from_df(cols=dep_var)
.add_test(test, label=0)
.databunch() )<define_variables>
|
clfs_best = {'QDA': QuadraticDiscriminantAnalysis(reg_param=rp_best)}
preds_best, auc_best = train_classifier('QDA', clfs=clfs_best, Y_pseudo=Y_pseudo, verbose=0)
print(f"AUC: {auc_best}" )
|
Instant Gratification
|
4,090,518 |
<train_model><EOS>
|
sub['target'] = preds_best
sub.to_csv('submission.csv',index=False )
|
Instant Gratification
|
4,362,670 |
<SOS> metric: AUC Kaggle data source: instant-gratification<train_model>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
train.head()
|
Instant Gratification
|
4,362,670 |
learn.fit(1, 1e-3 )<predict_on_test>
|
def get_mean_cov(x,y):
model = GraphicalLasso()
ones =(y==1 ).astype(bool)
x2 = x[ones]
model.fit(x2)
p1 = model.precision_
m1 = model.location_
onesb =(y==0 ).astype(bool)
x2b = x[onesb]
model.fit(x2b)
p2 = model.precision_
m2 = model.location_
ms = np.stack([m1,m2])
ps = np.stack([p1,p2])
return ms,ps
|
Instant Gratification
|
4,362,670 |
preds, targets = learn.get_preds()
predictions = np.argmax(preds, axis = 1)
pd.crosstab(predictions, targets )<predict_on_test>
|
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3, train2['target']):
ms, ps = get_mean_cov(train3[train_index,:],train2.loc[train_index]['target'].values)
gm = GaussianMixture(n_components=2, init_params='random', covariance_type='full', tol=0.001,reg_covar=0.001, max_iter=100, n_init=1,means_init=ms, precisions_init=ps)
gm.fit(np.concatenate([train3,test3],axis = 0))
oof[idx1[test_index]] = gm.predict_proba(train3[test_index,:])[:,0]
preds[idx2] += gm.predict_proba(test3)[:,0] / skf.n_splits
auc = roc_auc_score(train['target'],oof)
print('QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,362,670 |
predictions, *_ = learn.get_preds(DatasetType.Test)
labels = np.argmax(predictions, 1 )<save_to_csv>
|
cat_dict = dict()
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
cat_dict[i] = train3.shape[1]
|
Instant Gratification
|
4,362,670 |
sub_df = pd.DataFrame({'PassengerId': test_df['PassengerId'], 'Survived': labels})
sub_df.to_csv('submission.csv', index=False )<set_options>
|
test['target'] = preds
oof_qda = np.zeros(len(train))
preds_qda = np.zeros(len(test))
oof_knn = np.zeros(len(train))
preds_knn = np.zeros(len(test))
oof_svnu = np.zeros(len(train))
preds_svnu = np.zeros(len(test))
oof_svc = np.zeros(len(train))
preds_svc = np.zeros(len(test))
oof_rf = np.zeros(len(train))
preds_rf = np.zeros(len(test))
oof_mlp = np.zeros(len(train))
preds_mlp = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
pca = PCA(n_components=cat_dict[k], random_state= 1234)
pca.fit(train2p[cols])
train3p = pca.transform(train2p[cols])
train3 = pca.transform(train2[cols])
test3 = pca.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_qda[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_qda[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = neighbors.KNeighborsClassifier(n_neighbors=17, p=2.9)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_knn[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_knn[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = NuSVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=4, nu=0.59, coef0=0.053)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_svnu[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_svnu[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = svm.SVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=42)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_svc[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_svc[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = RandomForestClassifier(n_estimators=100,random_state=1)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_rf[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_rf[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = neural_network.MLPClassifier(random_state=3, activation='relu', solver='lbfgs', tol=1e-06, hidden_layer_sizes=(250,))
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_mlp[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_mlp[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
if k%32==0: print(k)
auc = roc_auc_score(train['target'],oof_qda)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
auc = roc_auc_score(train['target'],oof_knn)
print('Pseudo Labeled KNN scores CV =',round(auc,5))
auc = roc_auc_score(train['target'],oof_svnu)
print('Pseudo Labeled SVNU scores CV =',round(auc,5))
auc = roc_auc_score(train['target'],oof_svc)
print('Pseudo Labeled SVC scores CV =',round(auc,5))
auc = roc_auc_score(train['target'],oof_rf)
print('Pseudo Labeled RF scores CV =',round(auc,5))
auc = roc_auc_score(train['target'],oof_mlp)
print('Pseudo Labeled MLP scores CV =',round(auc,5))
|
Instant Gratification
|
4,362,670 |
warnings.filterwarnings('ignore' )<load_from_csv>
|
test['target'] = preds
oof_qda2 = np.zeros(len(train))
preds_qda2 = np.zeros(len(test))
oof_knn2 = np.zeros(len(train))
preds_knn2 = np.zeros(len(test))
oof_svnu2 = np.zeros(len(train))
preds_svnu2 = np.zeros(len(test))
oof_svc2 = np.zeros(len(train))
preds_svc2 = np.zeros(len(test))
oof_rf2 = np.zeros(len(train))
preds_rf2 = np.zeros(len(test))
oof_mlp2 = np.zeros(len(train))
preds_mlp2 = np.zeros(len(test))
for k in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = test[test['wheezy-copper-turtle-magic']==k]
test2p = test2[(test2['target']<=0.01)|(test2['target']>=0.99)].copy()
test2p.loc[ test2p['target']>=0.5, 'target' ] = 1
test2p.loc[ test2p['target']<0.5, 'target' ] = 0
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2p[cols])
train3p = sel.transform(train2p[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p, train2p['target']):
test_index3 = test_index[ test_index<len(train3)]
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_qda2[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_qda2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = neighbors.KNeighborsClassifier(n_neighbors=17, p=2.9)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_knn2[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_knn2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = NuSVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=4, nu=0.59, coef0=0.053)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_svnu2[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_svnu2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = svm.SVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=42)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_svc2[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_svc2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = RandomForestClassifier(n_estimators=100,random_state=1)
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_rf2[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_rf2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = neural_network.MLPClassifier(random_state=3, activation='relu', solver='lbfgs', tol=1e-06, hidden_layer_sizes=(250,))
clf.fit(train3p[train_index,:],train2p.loc[train_index]['target'])
oof_mlp2[idx1[test_index3]] = clf.predict_proba(train3[test_index3,:])[:,1]
preds_mlp2[test2.index] += clf.predict_proba(test3)[:,1] / skf.n_splits
if k%32==0: print(k)
auc = roc_auc_score(train['target'],oof_qda2)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
print('----------------')
print('knn', roc_auc_score(train['target'], oof_knn2))
print('svc', roc_auc_score(train['target'], oof_svc2))
print('svnu', roc_auc_score(train['target'], oof_svnu2))
print('rf', roc_auc_score(train['target'], oof_rf2))
print('mlp', roc_auc_score(train['target'], oof_mlp2))
|
Instant Gratification
|
4,362,670 |
class ReadCsvData(object):
def __init__(self):
df_train = pd.read_csv('.. /input/train.csv')
df_test = pd.read_csv('.. /input/test.csv')
self._train = df_train.drop(['label'], axis=1 ).values
self._labels = df_train['label'].values
self._test = df_test.values
def get_data(self):
self._train = self._train.astype(np.float32)
self._train = np.multiply(self._train, 1.0 / 255.0)
self._test = self._test.astype(np.float32)
self._test = np.multiply(self._test, 1.0 / 255.0)
self._labels = np.identity(10)[self._labels]
return self._train, self._labels,self._test<define_variables>
|
Instant Gratification
|
|
4,362,670 |
class DataSet(object):
def __init__(self,
images,
labels):
assert images.shape[0] == labels.shape[0],('images.shape: %s labels.shape: %s' %(images.shape, labels.shape))
self._num_examples = images.shape[0]
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
def next_batch(self, batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
self._epochs_completed += 1
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]<prepare_x_and_y>
|
auc = roc_auc_score(train['target'],oof_qda2*0.6+oof_svnu2*0.25 + oof_svc2*0.05 +oof_rf2*0.1)
print('Pseudo Labeled BLEND scores CV =',round(auc,5))
|
Instant Gratification
|
4,362,670 |
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 50], stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[50]))<define_search_space>
|
auc = roc_auc_score(train['target'],oof_qda2*0.5+oof_svnu2*0.3 + oof_svc2*0.05 + oof_knn2*0.025 + oof_rf2*0.1 + oof_mlp2*0.025)
print('Pseudo Labeled BLEND2 scores CV =',round(auc,5))
|
Instant Gratification
|
4,362,670 |
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME')+ b_conv1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME' )<init_hyperparams>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = preds_qda2
sub.to_csv('submission.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Final Test.csv predictions')
plt.show()
|
Instant Gratification
|
4,362,670 |
W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 50, 100], stddev=0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[100]))
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME')+ b_conv2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME' )<concatenate>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = preds_qda2*0.6+preds_svnu2*0.25 + preds_svc2*0.05 +preds_rf2*0.1
sub.to_csv('submission_blend.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Blend Test.csv predictions')
plt.show()
|
Instant Gratification
|
4,362,670 |
<categorify><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = preds_qda2*0.5+preds_svnu2*0.3 + preds_svc2*0.05 + preds_knn2*0.025 + preds_rf2*0.1 + preds_mlp2*0.025
sub.to_csv('submission_blend2.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Blend2 Test.csv predictions')
plt.show()
|
Instant Gratification
|
4,308,791 |
<SOS> metric: AUC Kaggle data source: instant-gratification<prepare_x_and_y>
|
sns.set_style('darkgrid')
pd.options.display.float_format = '{:,.3f}'.format
print(os.listdir(".. /input"))
|
Instant Gratification
|
4,308,791 |
W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2)+ b_fc2 )<compute_train_metric>
|
%%time
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
print(train.shape, test.shape )
|
Instant Gratification
|
4,308,791 |
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4 ).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
predict = tf.argmax(y_conv,1 )<features_selection>
|
null_cnt = train.isnull().sum().sort_values()
print('null count:', null_cnt[null_cnt > 0] )
|
Instant Gratification
|
4,308,791 |
sess.run(tf.global_variables_initializer() )<load_from_csv>
|
print(train['wheezy-copper-turtle-magic'].describe())
print()
print('unique value count:', train['wheezy-copper-turtle-magic'].nunique() )
|
Instant Gratification
|
4,308,791 |
input_data=ReadCsvData()
x_train, y_label, x_test = input_data.get_data()<define_variables>
|
numcols = train.drop(['id','target','wheezy-copper-turtle-magic'],axis=1 ).select_dtypes(include='number' ).columns.values
|
Instant Gratification
|
4,308,791 |
Batch_size=100
Train_Number=5300
accracies = []<train_model>
|
X_subset = train[train['wheezy-copper-turtle-magic'] == 0][numcols]
Y_subset = train[train['wheezy-copper-turtle-magic'] == 0]['target']
for k in range(2, 10):
knc = KNeighborsClassifier(n_neighbors=k)
knc.fit(X_subset, Y_subset)
score = knc.score(X_subset, Y_subset)
print("[{}] score: {:.2f}".format(k, score))
|
Instant Gratification
|
4,308,791 |
print('Start Learning', datetime.now() ,)
for j in range(3):
train_dataset = DataSet(x_train,y_label)
for i in range(Train_Number):
batch_x, batch_y = train_dataset.next_batch(Batch_size)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:batch_x, y_: batch_y, rate: 0.0})
accracies.append(train_accuracy)
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch_x, y_: batch_y, rate: 0.5})
print("step %d, training accuracy %g"%(i, train_accuracy))
print('Finish Learning', datetime.now() , )<save_to_csv>
|
all_data = train.append(test, sort=False ).reset_index(drop=True)
del train, test
gc.collect()
all_data.head()
|
Instant Gratification
|
4,308,791 |
submission_file=pd.DataFrame({'ImageId':np.arange(1,(x_test.shape[0] + 1)) , 'Label':predict.eval(feed_dict={x: x_test, rate: 0.0})})
print(submission_file)
submission_file.to_csv("submission_v1.csv", index=False)
print('Save submission', datetime.now() , )<load_from_csv>
|
constant_column = [col for col in all_data.columns if all_data[col].nunique() == 1]
print('drop columns:', constant_column)
all_data.drop(constant_column, axis=1, inplace=True )
|
Instant Gratification
|
4,308,791 |
data_folder = Path(".. /input")
train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/sample_submission.csv")
test_img = ImageList.from_df(test_df, path=data_folder/'test', folder='test')
trfm = get_transforms(do_flip=True, flip_vert=True, max_rotate=10.0, max_zoom=1.1, max_lighting=0.2, max_warp=0.2, p_affine=0.75, p_lighting=0.75)
train_img =(ImageList.from_df(train_df, path=data_folder/'train', folder='train')
.split_by_rand_pct(0.01)
.label_from_df()
.add_test(test_img)
.transform(trfm, size=128)
.databunch(path='.', bs=64, device= torch.device('cuda:0'))
.normalize(imagenet_stats)
)
learn = cnn_learner(train_img, models.densenet161, metrics=[error_rate, accuracy] )<train_model>
|
corr_matrix = all_data.corr().abs()
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1 ).astype(np.bool))
to_drop = [c for c in upper.columns if any(upper[c] > 0.95)]
del upper
drop_column = all_data.columns[to_drop]
print('drop columns:', drop_column)
|
Instant Gratification
|
4,308,791 |
lr = 3e-02
learn.fit_one_cycle(5, slice(lr))<save_to_csv>
|
X_train = all_data[all_data['target'].notnull() ].reset_index(drop=True)
X_test = all_data[all_data['target'].isnull() ].drop(['target'], axis=1 ).reset_index(drop=True)
del all_data
gc.collect()
X_train.drop(['id'], axis=1, inplace=True)
X_test_ID = X_test.pop('id')
Y_train = X_train.pop('target')
print(X_train.shape, X_test.shape )
|
Instant Gratification
|
4,308,791 |
preds,_ = learn.get_preds(ds_type=DatasetType.Test)
test_df.has_cactus = preds.numpy() [:, 0]
test_df.to_csv('submission.csv', index=False )<set_options>
|
oof_preds = np.zeros(X_train.shape[0])
sub_preds = np.zeros(X_test.shape[0])
splits = 11
for i in range(512):
train2 = X_train[X_train['wheezy-copper-turtle-magic'] == i][numcols]
train2_y = Y_train[X_train['wheezy-copper-turtle-magic'] == i]
test2 = X_test[X_test['wheezy-copper-turtle-magic'] == i][numcols]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5)
train2 = sel.fit_transform(train2)
test2 = sel.transform(test2)
skf = StratifiedKFold(n_splits=splits, random_state=42)
for train_index, test_index in skf.split(train2, train2_y):
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train2[train_index], train2_y.iloc[train_index])
oof_preds[idx1[test_index]] = clf.predict_proba(train2[test_index])[:,1]
sub_preds[idx2] += clf.predict_proba(test2)[:,1] / skf.n_splits
|
Instant Gratification
|
4,308,791 |
%matplotlib inline
sns.set(style='white', context='notebook', palette='deep' )<load_from_csv>
|
len(X_train[(oof_preds > 0.3)&(oof_preds < 0.7)] )
|
Instant Gratification
|
4,308,791 |
train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv")
IDtest = test["PassengerId"]<drop_column>
|
X_train = X_train[(oof_preds <= 0.3)|(oof_preds >= 0.7)]
Y_train = Y_train[(oof_preds <= 0.3)|(oof_preds >= 0.7)]
|
Instant Gratification
|
4,308,791 |
train = train.drop(Outliers_to_drop, axis = 0 ).reset_index(drop=True )<concatenate>
|
X_test_p1 = X_test[(sub_preds <= 0.01)].copy()
X_test_p2 = X_test[(sub_preds >= 0.99)].copy()
X_test_p1['target'] = 0
X_test_p2['target'] = 1
print(X_test_p1.shape, X_test_p2.shape)
Y_train = pd.concat([Y_train, X_test_p1.pop('target'), X_test_p2.pop('target')], axis=0)
X_train = pd.concat([X_train, X_test_p1, X_test_p2], axis=0)
Y_train.reset_index(drop=True, inplace=True)
X_train.reset_index(drop=True, inplace=True )
|
Instant Gratification
|
4,308,791 |
train_len = len(train)
dataset = pd.concat(objs=[train, test], axis=0 ).reset_index(drop=True )<count_missing_values>
|
_=
for i in range(512):
train_f =(X_train['wheezy-copper-turtle-magic'] == i)
test_f =(X_test['wheezy-copper-turtle-magic'] == i)
X_train_sub = X_train[train_f][numcols]
Y_train_sub = Y_train[train_f]
X_test_sub = X_test[test_f][numcols]
lda = LDA(n_components=1)
lda.fit(X_train_sub, Y_train_sub)
X_train.loc[train_f, 'lda'] = lda.transform(X_train_sub ).reshape(-1)
X_test.loc[test_f, 'lda'] = lda.transform(X_test_sub ).reshape(-1)
knc = KNeighborsClassifier(n_neighbors=3)
knc.fit(X_train_sub, Y_train_sub)
X_train.loc[train_f, 'knc'] = knc.predict_proba(X_train_sub)[:,1]
X_test.loc[test_f, 'knc'] = knc.predict_proba(X_test_sub)[:,1]
|
Instant Gratification
|
4,308,791 |
dataset = dataset.fillna(np.nan)
dataset.isnull().sum()<feature_engineering>
|
oof_preds = np.zeros(X_train.shape[0])
sub_preds = np.zeros(X_test.shape[0])
splits = 11
for i in range(512):
train2 = X_train[X_train['wheezy-copper-turtle-magic'] == i][numcols]
train2_y = Y_train[X_train['wheezy-copper-turtle-magic'] == i]
test2 = X_test[X_test['wheezy-copper-turtle-magic'] == i][numcols]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5)
train2 = sel.fit_transform(train2)
test2 = sel.transform(test2)
skf = StratifiedKFold(n_splits=splits, random_state=42)
for train_index, test_index in skf.split(train2, train2_y):
clf = QuadraticDiscriminantAnalysis(reg_param=0.5)
clf.fit(train2[train_index], train2_y.iloc[train_index])
oof_preds[idx1[test_index]] = clf.predict_proba(train2[test_index])[:,1]
sub_preds[idx2] += clf.predict_proba(test2)[:,1] / skf.n_splits
|
Instant Gratification
|
4,308,791 |
<feature_engineering><EOS>
|
submission = pd.DataFrame({
'id': X_test_ID,
'target': sub_preds
})
submission.to_csv("submission.csv", index=False )
|
Instant Gratification
|
4,399,207 |
<SOS> metric: AUC Kaggle data source: instant-gratification<data_type_conversions>
|
%matplotlib inline
np.random.seed(1111)
warnings.filterwarnings('ignore' )
|
Instant Gratification
|
4,399,207 |
dataset["Embarked"] = dataset["Embarked"].fillna("S" )<categorify>
|
train = pd.read_csv('.. /input/instant-gratification/train.csv')
test = pd.read_csv('.. /input/instant-gratification/test.csv')
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
|
Instant Gratification
|
4,399,207 |
dataset["Sex"] = dataset["Sex"].map({"male": 0, "female":1} )<feature_engineering>
|
gm_list = pickle.load(open('.. /input/models-v5/gm_models_v5.pkl', 'rb'))
|
Instant Gratification
|
4,399,207 |
index_NaN_age = list(dataset["Age"][dataset["Age"].isnull() ].index)
for i in index_NaN_age :
age_med = dataset["Age"].median()
age_pred = dataset["Age"][(( dataset['SibSp'] == dataset.iloc[i]["SibSp"])&(dataset['Parch'] == dataset.iloc[i]["Parch"])&(dataset['Pclass'] == dataset.iloc[i]["Pclass"])) ].median()
if not np.isnan(age_pred):
dataset['Age'].iloc[i] = age_pred
else :
dataset['Age'].iloc[i] = age_med<feature_engineering>
|
class MyGM(GaussianMixture):
def __init__(self, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weights_init=None, means_init=None, precisions_init=None,
random_state=None, warm_start=False,
verbose=0, verbose_interval=10, init_clusters=None, y=None):
super().__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval)
self.init_clusters_ = np.asarray(init_clusters ).astype('int')
self.y = y
def _initialize_parameters(self, X, random_state):
n_samples, _ = X.shape
if self.init_params == 'kmeans':
resp = np.zeros(( n_samples, self.n_components))
label = cluster.KMeans(n_clusters=self.n_components, n_init=1,
random_state=random_state ).fit(X ).labels_
resp[np.arange(n_samples), label] = 1
elif self.init_params == 'random':
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
elif self.init_params == 'clusters':
resp = np.zeros(( n_samples, self.n_components))
resp[np.arange(self.init_clusters_.shape[0]), self.init_clusters_] = 1
else:
raise ValueError("Unimplemented initialization method '%s'"
% self.init_params)
self._initialize(X, resp)
def estimate_log_ratio(self, X):
weighted_log_prob = self._estimate_weighted_log_prob(X)
return logsumexp(weighted_log_prob[:, 1::2], axis=1)- logsumexp(weighted_log_prob[:, 0::2], axis=1)
|
Instant Gratification
|
4,399,207 |
dataset_title = [i.split(",")[1].split(".")[0].strip() for i in dataset["Name"]]
dataset["Title"] = pd.Series(dataset_title)
dataset["Title"].head()<categorify>
|
class MyBGM(BayesianGaussianMixture):
def __init__(self, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=None,
mean_precision_prior=None, mean_prior=None,
degrees_of_freedom_prior=None, covariance_prior=None,
random_state=None, warm_start=False, verbose=0,
verbose_interval=10, init_clusters=None):
super().__init__(
n_components=n_components, covariance_type=covariance_type, tol=tol,
reg_covar=reg_covar, max_iter=max_iter, n_init=n_init, init_params=init_params,
weight_concentration_prior_type=weight_concentration_prior_type,
weight_concentration_prior=weight_concentration_prior,
mean_precision_prior=mean_precision_prior, mean_prior=mean_prior,
degrees_of_freedom_prior=degrees_of_freedom_prior, covariance_prior=covariance_prior,
random_state=random_state, warm_start=warm_start, verbose=verbose,
verbose_interval=verbose_interval)
self.init_clusters_ = np.asarray(init_clusters ).astype('int')
def _initialize_parameters(self, X, random_state):
n_samples, _ = X.shape
if self.init_params == 'kmeans':
resp = np.zeros(( n_samples, self.n_components))
label = cluster.KMeans(n_clusters=self.n_components, n_init=1,
random_state=random_state ).fit(X ).labels_
resp[np.arange(n_samples), label] = 1
elif self.init_params == 'random':
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
elif self.init_params == 'clusters':
resp = np.zeros(( n_samples, self.n_components))
resp[np.arange(self.init_clusters_.shape[0]), self.init_clusters_] = 1
elif self.init_params == 'proba':
resp = self.init_proba_.copy()
resp[np.arange(self.init_clusters_.shape[0]), self.init_clusters_] = 1
resp /= resp.sum(axis=1)[:, np.newaxis]
else:
raise ValueError("Unimplemented initialization method '%s'"
% self.init_params)
self._initialize(X, resp )
|
Instant Gratification
|
4,399,207 |
dataset["Title"] = dataset["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset["Title"] = dataset["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
dataset["Title"] = dataset["Title"].astype(int )<drop_column>
|
roc_auc_score(train["target"], oof_preds )
|
Instant Gratification
|
4,399,207 |
<feature_engineering><EOS>
|
sub = pd.read_csv('.. /input/instant-gratification/sample_submission.csv')
sub['target'] = test_preds
sub.to_csv('submission.csv',index=False )
|
Instant Gratification
|
4,394,358 |
<SOS> metric: AUC Kaggle data source: instant-gratification<categorify>
|
warnings.filterwarnings('ignore')
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
print(train.shape, test.shape)
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
for itr in range(4):
test['target'] = preds
test.loc[test['target'] > 0.955, 'target'] = 1
test.loc[test['target'] < 0.045, 'target'] = 0
usefull_test = test[(test['target'] == 1)|(test['target'] == 0)]
new_train = pd.concat([train, usefull_test] ).reset_index(drop=True)
print(usefull_test.shape[0], "Test Records added for iteration : ", itr)
print(new_train.head(100))
|
Instant Gratification
|
4,394,358 |
dataset['Single'] = dataset['Fsize'].map(lambda s: 1 if s == 1 else 0)
dataset['SmallF'] = dataset['Fsize'].map(lambda s: 1 if s == 2 else 0)
dataset['MedF'] = dataset['Fsize'].map(lambda s: 1 if 3 <= s <= 4 else 0)
dataset['LargeF'] = dataset['Fsize'].map(lambda s: 1 if s >= 5 else 0 )<categorify>
|
warnings.filterwarnings('ignore')
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
print(train.shape, test.shape)
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
for itr in range(4):
test['target'] = preds
test.loc[test['target'] > 0.94, 'target'] = 1
test.loc[test['target'] < 0.06, 'target'] = 0
usefull_test = test[(test['target'] == 1)|(test['target'] == 0)]
new_train = pd.concat([train, usefull_test] ).reset_index(drop=True)
print(usefull_test.shape[0], "Test Records added for iteration : ", itr)
new_train.loc[oof > 0.98, 'target'] = 1
new_train.loc[oof < 0.02, 'target'] = 0
oof3 = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = new_train[new_train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train[train['wheezy-copper-turtle-magic']==i].index
idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]
test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
oof_test_index = [t for t in test_index if t < len(idx1)]
clf = QuadraticDiscriminantAnalysis(0.5)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
if len(oof_test_index)> 0:
oof3[idx1[oof_test_index]] = clf.predict_proba(train3[oof_test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof3)
print(f'AUC: {auc:.5}')
sub2 = pd.read_csv('.. /input/sample_submission.csv')
sub2['target'] = preds
|
Instant Gratification
|
4,394,358 |
dataset = pd.get_dummies(dataset, columns = ["Title"])
dataset = pd.get_dummies(dataset, columns = ["Embarked"], prefix="Em" )<feature_engineering>
|
warnings.filterwarnings('ignore')
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
print(train.shape, test.shape)
oof = np.zeros(len(train))
preds = np.zeros(len(test))
params = [{'reg_param': [0.1, 0.2, 0.3, 0.4, 0.5]}]
reg_params = np.zeros(512)
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
qda = QuadraticDiscriminantAnalysis()
clf = GridSearchCV(qda, params, cv=4)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
reg_params[i] = clf.best_params_['reg_param']
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof)
print(f'AUC: {auc:.5}')
for itr in range(10):
test['target'] = preds
test.loc[test['target'] > 0.955, 'target'] = 1
test.loc[test['target'] < 0.045, 'target'] = 0
usefull_test = test[(test['target'] == 1)|(test['target'] == 0)]
new_train = pd.concat([train, usefull_test] ).reset_index(drop=True)
print(usefull_test.shape[0], "Test Records added for iteration : ", itr)
new_train.loc[oof > 0.995, 'target'] = 1
new_train.loc[oof < 0.005, 'target'] = 0
oof4 = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = new_train[new_train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train[train['wheezy-copper-turtle-magic']==i].index
idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=2)) ,('scaler', StandardScaler())])
data2 = pipe.fit_transform(data[cols])
train3 = data2[:train2.shape[0]]
test3 = data2[train2.shape[0]:]
skf = StratifiedKFold(n_splits=11, random_state=time.time)
for train_index, test_index in skf.split(train2, train2['target']):
oof_test_index = [t for t in test_index if t < len(idx1)]
clf = QuadraticDiscriminantAnalysis(reg_params[i])
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
if len(oof_test_index)> 0:
oof4[idx1[oof_test_index]] = clf.predict_proba(train3[oof_test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
auc = roc_auc_score(train['target'], oof4)
print(f'AUC: {auc:.5}')
sub3 = pd.read_csv('.. /input/sample_submission.csv')
sub3['target'] = preds
|
Instant Gratification
|
4,394,358 |
dataset["Cabin"] = pd.Series([i[0] if not pd.isnull(i)else 'X' for i in dataset['Cabin'] ] )<categorify>
|
print('CV score ensemble=',round(roc_auc_score(train['target'],oof2*0.35 + oof3*0.25+ oof4*0.4),5))
|
Instant Gratification
|
4,394,358 |
dataset = pd.get_dummies(dataset, columns = ["Cabin"],prefix="Cabin" )<feature_engineering>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub.head()
|
Instant Gratification
|
4,394,358 |
Ticket = []
for i in list(dataset.Ticket):
if not i.isdigit() :
Ticket.append(i.replace(".","" ).replace("/","" ).strip().split(' ')[0])
else:
Ticket.append("X")
dataset["Ticket"] = Ticket
dataset["Ticket"].head()<categorify>
|
sub['target'] = 1/3*sub1.target + 1/3*sub2.target + 1/3*sub3.target
|
Instant Gratification
|
4,394,358 |
dataset = pd.get_dummies(dataset, columns = ["Ticket"], prefix="T" )<categorify>
|
sub.to_csv('submission.csv', index = False)
sub.head()
|
Instant Gratification
|
4,424,337 |
dataset["Pclass"] = dataset["Pclass"].astype("category")
dataset = pd.get_dummies(dataset, columns = ["Pclass"],prefix="Pc" )<drop_column>
|
def permute_predict(y):
_y = y.copy()
_c1 = _y < 0.00001
_c2 = _y > 0.99999
_y[_c1] = _y[_c1].max() - _y[_c1] + _y[_c1].min()
_y[_c2] = _y[_c2].max() - _y[_c2] + _y[_c2].min()
return _y
|
Instant Gratification
|
4,424,337 |
dataset.drop(labels = ["PassengerId"], axis = 1, inplace = True )<drop_column>
|
warnings.filterwarnings('ignore' )
|
Instant Gratification
|
4,424,337 |
train = dataset[:train_len]
test = dataset[train_len:]
test.drop(labels=["Survived"],axis = 1,inplace=True )<prepare_x_and_y>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
train.head()
|
Instant Gratification
|
4,424,337 |
train["Survived"] = train["Survived"].astype(int)
Y_train = train["Survived"]
X_train = train.drop(labels = ["Survived"],axis = 1 )<choose_model_class>
|
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic' )
|
Instant Gratification
|
4,424,337 |
lr = LogisticRegression()<import_modules>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
data2 = VarianceThreshold(2.3 ).fit_transform(data[cols])
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
for c in range(train3.shape[1]):
low_=np.quantile(train3[:,c] , 0.001)
up_=np.quantile(train3[:,c], 0.999)
train3[:,c]=np.clip(train3[:,c],low_, up_)
test3[:,c]=np.clip(test3[:,c],low_, up_)
skf = StratifiedKFold(n_splits=11, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
gmm=GMM(n_components=5, random_state=42, covariance_type='full')
gmm.fit(np.vstack([train3[train_index], test3]))
gmm_1_train=gmm.predict_proba(train3[train_index])
gmm_1_val=gmm.predict_proba(train3[test_index])
gmm_1_test=gmm.predict_proba(test3)
gmm=GMM(n_components=4, random_state=42, covariance_type='full')
gmm.fit(np.vstack([train3[train_index], test3]))
gmm_2_train=gmm.predict_proba(train3[train_index])
gmm_2_val=gmm.predict_proba(train3[test_index])
gmm_2_test=gmm.predict_proba(test3)
gmm=GMM(n_components=6, random_state=42, covariance_type='full')
gmm.fit(np.vstack([train3[train_index], test3]))
gmm_3_train=gmm.predict_proba(train3[train_index])
gmm_3_val=gmm.predict_proba(train3[test_index])
gmm_3_test=gmm.predict_proba(test3)
bgm=BGM(n_components=5, random_state=42)
bgm.fit(np.vstack([train3[train_index], test3]))
bgm_1_train=bgm.predict_proba(train3[train_index])
bgm_1_val=bgm.predict_proba(train3[test_index])
bgm_1_test=bgm.predict_proba(test3)
bgm=BGM(n_components=4, random_state=42)
bgm.fit(np.vstack([train3[train_index], test3]))
bgm_2_train=bgm.predict_proba(train3[train_index])
bgm_2_val=bgm.predict_proba(train3[test_index])
bgm_2_test=bgm.predict_proba(test3)
bgm=BGM(n_components=6, random_state=42)
bgm.fit(np.vstack([train3[train_index], test3]))
bgm_3_train=bgm.predict_proba(train3[train_index])
bgm_3_val=bgm.predict_proba(train3[test_index])
bgm_3_test=bgm.predict_proba(test3)
_train = np.hstack(( train3[train_index],
gmm_1_train, gmm_2_train, gmm_3_train,
bgm_1_train, bgm_2_train, bgm_3_train))
_val = np.hstack(( train3[test_index],
gmm_1_val, gmm_2_val, gmm_3_val,
bgm_1_val, bgm_2_val, bgm_3_val))
_test = np.hstack(( test3,
gmm_1_test, gmm_2_test, gmm_3_test,
bgm_1_test, bgm_2_test, bgm_3_test))
clf = QuadraticDiscriminantAnalysis(reg_param=0.04, tol=0.01)
clf.fit(_train,train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(_val)[:,1]
preds[idx2] += clf.predict_proba(_test)[:,1] / skf.n_splits
print(i, roc_auc_score(train2['target'], oof[idx1]))
print(roc_auc_score(train['target'], oof))
|
Instant Gratification
|
4,424,337 |
from sklearn.naive_bayes import GaussianNB<choose_model_class>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = permute_predict(preds)
sub.to_csv('submission.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Final Test.csv predictions')
plt.show()
|
Instant Gratification
|
4,388,308 |
gnb = GaussianNB()<import_modules>
|
%matplotlib inline
|
Instant Gratification
|
4,388,308 |
from keras.models import Sequential<import_modules>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
train.head()
|
Instant Gratification
|
4,388,308 |
from keras.layers import Dense,BatchNormalization,Dropout<import_modules>
|
def data_augmentation(X, y):
mean_train2_0 = X[y==0].mean()
mean_train2_1 = X[y==1].mean()
train2_0 = 2*mean_train2_0.reshape(1,-1)- X[y==0]
train2_1 = 2*mean_train2_1.reshape(1,-1)- X[y==1]
tmp_train2_0 = np.vstack([X[y==0], train2_0])
tmp_train2_1 = np.vstack([X[y==1], train2_1])
train2 = np.vstack([tmp_train2_0, tmp_train2_1])
y2 = np.array([0]*len(tmp_train2_0)+ [1]*len(tmp_train2_1))
return np.vstack([X, train2]), np.concatenate([y, y2])
|
Instant Gratification
|
4,388,308 |
from keras import callbacks<choose_model_class>
|
def get_kmeans_clusters(x, y, k_pos=1, k_neg=1):
x_zeros = x[y==0]
x_ones = x[y==1]
model_0 = KMeans(n_clusters=k_neg)
model_1 = KMeans(n_clusters=k_pos)
model_0.fit(x_zeros)
model_1.fit(x_ones)
model_0_clus = [x_zeros[model_0.labels_==k] for k in range(model_0.n_clusters)]
model_1_clus = [x_ones[model_1.labels_==k] for k in range(model_1.n_clusters)]
return model_1_clus + model_0_clus
def fit_multicluster_gmm(x, y, xt, k_pos, k_neg, max_iter=100):
clusters = get_kmeans_clusters(x, y, k_pos=k_pos, k_neg=k_neg)
for i in range(len(clusters)) :
x_cluster = clusters[i]
model = ShrunkCovariance()
model.fit(x_cluster)
if(i==0):
ps = np.expand_dims(model.precision_, axis=0)
ms = np.expand_dims(model.location_, axis=0)
else:
ps = np.concatenate([ps, np.expand_dims(model.precision_, axis=0)], axis=0)
ms = np.concatenate([ms, np.expand_dims(model.location_, axis=0)], axis=0)
gm = mixture.GaussianMixture(n_components=k_pos+k_neg,
init_params='random',
covariance_type='full',
tol=0.001,
reg_covar=0.001,
max_iter=100,
n_init=5,
means_init=ms,
precisions_init=ps)
gm.fit(np.vstack(( x.astype(np.float), xt.astype(np.float))))
preds = gm.predict_proba(x.astype(np.float)) [:,0]
score = roc_auc_score(y, preds)
return score, gm, k_pos, k_neg
def get_mean_cov(x,y, model=GraphicalLasso() , max_iter=100):
try:
model.set_params(**{'max_iter':200})
except:
pass
ones =(y==1 ).astype(bool)
x2 = x[ones]
model.fit(x2)
p1 = model.precision_
m1 = model.location_
onesb =(y==0 ).astype(bool)
x2b = x[onesb]
model.fit(x2b)
p2 = model.precision_
m2 = model.location_
ms = np.stack([m1,m2])
ps = np.stack([p1,p2])
return ms,ps
|
Instant Gratification
|
4,388,308 |
model = Sequential()<choose_model_class>
|
def extract_wheezy_copper_turtle_magic(train, i):
train2 = train[train['wheezy-copper-turtle-magic']==i].copy()
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
target = train2['target'].astype(np.int ).values
train2.reset_index(drop=True, inplace=True)
return train2.drop(['id', 'target'], axis=1), test2.drop(['id'], axis=1), idx1, idx2, target
|
Instant Gratification
|
4,388,308 |
model.add(Dense(32,activation = "relu",input_shape =(66,)))
model.add(Dense(64,activation = "relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(1,activation = 'sigmoid'))<train_model>
|
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
oof = np.zeros(len(train))
test_preds = np.zeros(len(test))
oof_gmm = np.zeros(len(train))
test_preds_gmm = np.zeros(len(test))
oof_gmm_2 = np.zeros(len(train))
test_preds_gmm_2 = np.zeros(len(test))
trials = 3
cat_dict = dict()
cluster_report = list()
for i in tqdm(range(512)) :
train2, test2, idx1, idx2, target = extract_wheezy_copper_turtle_magic(train, i)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
pipe = Pipeline([('vt', VarianceThreshold(threshold=1.5)) ])
train3 = pipe.fit_transform(train2[cols])
test3 = pipe.fit_transform(test2[cols])
data2 = StandardScaler().fit_transform(PCA(n_components=40, random_state=4 ).fit_transform(data[cols]))
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
cat_dict[i] = train3.shape[1]
try:
score, gm, k_pos, k_neg = fit_multicluster_gmm(x=train3, y=target, xt=test3, k_pos=1, k_neg=3)
except:
score, gm, k_pos, k_neg = fit_multicluster_gmm(x=train3, y=target, xt=test3, k_pos=1, k_neg=1)
oof_gmm[idx1] = gm.predict_proba(train3)[:,0]
test_preds_gmm[idx2] += gm.predict_proba(test3)[:,0]
try:
score, gm, k_pos, k_neg = fit_multicluster_gmm(x=train3, y=target, xt=test3, k_pos=3, k_neg=3)
except:
score, gm, k_pos, k_neg = fit_multicluster_gmm(x=train3, y=target, xt=test3, k_pos=1, k_neg=1)
clusters = gm.predict_proba(train3 ).shape[1]
oof_gmm_2[idx1] = np.sum(gm.predict_proba(train3)[:,:clusters//2], axis=1)
test_preds_gmm_2[idx2] += np.sum(gm.predict_proba(test3)[:,:clusters//2], axis=1 )
|
Instant Gratification
|
4,388,308 |
model.compile(optimizer = "adam",loss ="binary_crossentropy",metrics = ['accuracy'])
reduce_lr = callbacks.ReduceLROnPlateau(monitor='acc', factor=0.2,patience=3, min_lr=0.0001)
model.fit(X_train,Y_train,epochs = 30,callbacks = [reduce_lr] )<predict_on_test>
|
oof_auc_gmm = roc_auc_score(train['target'], oof_gmm)
print('OOF AUC: =',round(oof_auc_gmm, 5))
oof_auc_gmm_2 = roc_auc_score(train['target'], oof_gmm_2)
print('OOF AUC: =',round(oof_auc_gmm_2, 5))
oof_auc_blend = roc_auc_score(train['target'],(0.3*oof_gmm+0.7*oof_gmm_2))
print('OOF AUC: =',round(oof_auc_blend, 5))
|
Instant Gratification
|
4,388,308 |
<categorify><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = test_preds_gmm
sub.to_csv('submission_gmm.csv',index=False)
sub['target'] = test_preds_gmm_2
sub.to_csv('submission_gmm_2.csv',index=False)
sub['target'] =(test_preds_gmm + test_preds_gmm_2)/2
sub.to_csv('submission_blend.csv',index=False )
|
Instant Gratification
|
4,409,887 |
<SOS> metric: AUC Kaggle data source: instant-gratification<normalization>
|
import numpy as np, pandas as pd, os
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from sklearn.covariance import EmpiricalCovariance
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import sympy
|
Instant Gratification
|
4,409,887 |
y = sig(y)
y<create_dataframe>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
train.head()
|
Instant Gratification
|
4,409,887 |
ans2 = pd.DataFrame(IDtest )<feature_engineering>
|
def get_mean_cov(x,y):
model = OAS()
ones =(y==1 ).astype(bool)
x2 = x[ones]
model.fit(x2)
p1 = model.precision_
m1 = model.location_
onesb =(y==0 ).astype(bool)
x2b = x[onesb]
model.fit(x2b)
p2 = model.precision_
m2 = model.location_
ms = np.stack([m1,m2])
ps = np.stack([p1,p2])
return ms,ps
|
Instant Gratification
|
4,409,887 |
ans2["Survived"] = y<save_to_csv>
|
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for i in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3, train2['target']):
x_train, y_train = train3[train_index,:], train2.loc[train_index]['target'].values
x_train_0 = x_train[y_train==0]
x_train_1 = x_train[y_train==1]
brc = Birch(branching_factor=50, n_clusters=3, threshold=0.4, compute_labels=True)
labels_0 = brc.fit_predict(x_train_0)
labels_1 = brc.fit_predict(x_train_1)
zero_mean = []
zero_cov = []
for l in np.unique(labels_0):
model = OAS()
model.fit(x_train_0[labels_0==l])
p = model.precision_
m = model.location_
zero_mean.append(m)
zero_cov.append(p)
one_mean = []
one_cov = []
for l in np.unique(labels_1):
model = OAS()
model.fit(x_train_1[labels_1==l])
p = model.precision_
m = model.location_
one_mean.append(m)
one_cov.append(p)
ms = np.stack(zero_mean + one_mean)
ps = np.stack(zero_cov + one_cov)
gm = GaussianMixture(n_components=6, init_params='random',
covariance_type='full', tol=0.001,reg_covar=0.001, max_iter=100, n_init=1, means_init=ms, precisions_init=ps)
gm.fit(np.concatenate([train3[train_index,:],test3],axis = 0))
oof[idx1[test_index]] = gm.predict_proba(train3[test_index,:])[:, 0:3].mean(axis=1)
preds[idx2] += gm.predict_proba(test3)[:, 0:3].mean(axis=1)/ skf.n_splits
print('AUC ', i, roc_auc_score(1- train2['target'], oof[idx1]))
auc = roc_auc_score(1 - train['target'],oof)
print('QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,409,887 |
ans2.to_csv("sub.csv",index = False )<data_type_conversions>
|
auc = roc_auc_score(1 - train['target'],oof)
print('QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,409,887 |
x_train = np.load('.. /input/reducing-image-sizes-to-32x32/X_train.npy')
x_test = np.load('.. /input/reducing-image-sizes-to-32x32/X_test.npy')
y_train = np.load('.. /input/reducing-image-sizes-to-32x32/y_train.npy')
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255.
x_test /= 255 .<train_model>
|
x_test_0 = pd.read_csv('.. /input/test.csv')
x_test_0['target']=preds
|
Instant Gratification
|
4,409,887 |
datagen_train = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True)
datagen_train.fit(x_train )<import_modules>
|
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic' )
|
Instant Gratification
|
4,409,887 |
from keras.applications import DenseNet121
from keras.layers import *
from keras.models import Sequential<choose_model_class>
|
oof = np.zeros(len(train))
preds = np.zeros(len(test))
for k in tqdm_notebook(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==k]
train2p = train2.copy() ; idx1 = train2.index
test2 = x_test_0[x_test_0['wheezy-copper-turtle-magic']==k]
test2p = test2
train2p = pd.concat([train2p,test2p],axis=0)
train2p.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2p[cols])
train3p = sel.transform(train2p[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = KFold(n_splits=17, random_state=42, shuffle=True)
for train_index, test_index in skf.split(train3p):
test_index3 = test_index[ test_index<len(train3)]
clf = neighbors.KNeighborsRegressor(n_neighbors=9, weights='distance')
clf.fit(train3p[train_index,:], train2p.loc[train_index]['target'])
oof[idx1[test_index3]] = clf.predict(train3[test_index3,:])
preds[test2.index] += clf.predict(test3)/ skf.n_splits
if k%64==0: print(k)
auc = roc_auc_score(train['target'], oof)
print('Pseudo Labeled QDA scores CV =',round(auc,5))
|
Instant Gratification
|
4,409,887 |
<choose_model_class><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = preds
sub.to_csv('submission.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Final Test.csv predictions')
plt.show()
|
Instant Gratification
|
4,275,635 |
<SOS> metric: AUC Kaggle data source: instant-gratification<choose_model_class>
|
PATH_BASE = Path('.. /input')
PATH_WORKING = Path('.. /working' )
|
Instant Gratification
|
4,275,635 |
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'] )<train_model>
|
train = pd.read_csv(PATH_BASE/'train.csv')
test = pd.read_csv(PATH_BASE/'test.csv' )
|
Instant Gratification
|
4,275,635 |
str_ = 'Traning Started'
os.system('echo '+str_ )<train_model>
|
def get_mean_cov(x,y):
model = GraphicalLasso(max_iter=200)
ones =(y==1 ).astype(bool)
x2 = x[ones]
model.fit(x2)
p1 = model.precision_
m1 = model.location_
onesb =(y==0 ).astype(bool)
x2b = x[onesb]
model.fit(x2b)
p2 = model.precision_
m2 = model.location_
ms = np.stack([m1,m2])
ps = np.stack([p1,p2])
return ms,ps
|
Instant Gratification
|
4,275,635 |
batch_size = 128
epochs = 25
checkpoint = ModelCheckpoint(
'model.h5',
monitor='val_acc',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto'
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=64,
epochs=10,
callbacks=[checkpoint],
validation_split=0.1
)<load_pretrained>
|
def projectMeans(means):
means[means>0]=1
means[means<=0]=-1
return means
def _compute_precision_cholesky(covariances, covariance_type):
estimate_precision_error_message =("Hell no")
if covariance_type in 'full':
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty(( n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True ).T
return precisions_chol
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
n_components, n_features = means.shape
covariances = np.empty(( n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff)/ nk[k]
covariances[k].flat[::n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_parameters2(X, resp, reg_covar, covariance_type):
nk = resp.sum(axis=0)+ 10 * np.finfo(resp.dtype ).eps
means = np.dot(resp.T, X)/ nk[:, np.newaxis]
means = projectMeans(means)
covariances = {"full": _estimate_gaussian_covariances_full}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
class GaussianMixture2(GaussianMixture):
def _m_step(self, X, log_resp):
resp = np.exp(log_resp)
sums = resp.sum(0)
if sums.max() - sums.min() > 2:
for i in range(3):
resp = len(X)* resp / resp.sum(0)/ len(sums)
resp = resp/resp.sum(1)[:,None]
n_samples, _ = X.shape
self.weights_, self.means_, self.covariances_ =(
_estimate_gaussian_parameters2(X, resp, self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
random.seed(1234)
np.random.seed(1234)
os.environ['PYTHONHASHSEED'] = str(1234 )
|
Instant Gratification
|
4,275,635 |
model.load_weights('model.h5' )<predict_on_test>
|
cols = [c for c in train.columns if c not in ['id', 'target']]
cols.remove('wheezy-copper-turtle-magic')
oof = np.zeros(len(train))
preds = np.zeros(len(test))
N_RAND_INIT = 2
N_CLUST_OPT = 3
N_TEST = 1
all_acc = np.zeros(( 512, N_CLUST_OPT, N_RAND_INIT))
all_roc = np.zeros(( 512, N_CLUST_OPT, N_RAND_INIT))
cluster_cnt = np.zeros(( 512, N_CLUST_OPT, N_RAND_INIT))
j_selection = np.zeros(N_CLUST_OPT)
for i in tqdm(range(512)) :
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5 ).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
test_index = range(len(train3))
yf = train2['target']
ms, ps = get_mean_cov(train3,yf)
cc_list = []
nc_list = 2*(np.array(range(N_CLUST_OPT)) + 2)
for j in range(N_CLUST_OPT):
cc_list.append(['cluster_' + str(i)for i in range(nc_list[j])])
gm_list = []
acc = np.zeros(( N_CLUST_OPT, N_RAND_INIT))
res_list = []
ctc_list = []
for j in range(N_CLUST_OPT):
gm_list.append([])
res_list.append([])
ctc_list.append([])
nc = nc_list[j]
cl = int(0.5*nc)
for k in range(N_RAND_INIT):
ps_list = np.concatenate([ps]*cl, axis=0)
th_step = 100/(cl+1)
th_p = np.arange(th_step,99,th_step)+ 0.5*(np.random.rand(cl)- 0.5)*th_step
th = np.percentile(ms,th_p)
ms_list = []
for t in range(cl):
ms_new = ms.copy()
ms_new[ms>=th[t]]=1
ms_new[ms<th[t]]=-1
ms_list.append(ms_new)
ms_list = np.concatenate(ms_list, axis=0)
perm = np.random.permutation(nc)
ps_list = ps_list[perm]
ms_list = ms_list[perm]
gm = GaussianMixture2(n_components=nc, init_params='random', covariance_type='full', tol=0.0001,reg_covar=0.001,
max_iter=5000, n_init=1, means_init=ms_list, precisions_init=ps_list, random_state=1234)
gm.fit(np.concatenate([train3,test3],axis = 0))
res = pd.concat([pd.DataFrame(gm.predict_proba(train3), columns = cc_list[j]),
yf.to_frame().reset_index(drop=True)], sort=False, axis=1)
cluster_to_class = res.groupby('target' ).agg('mean' ).values.argmax(0)
cluster_cnt[i,j,k] = cluster_to_class.sum()
res = pd.concat([pd.DataFrame(gm.predict_proba(train3), columns = cc_list[j]),
pd.DataFrame(cluster_to_class, index=cc_list[j],
columns=['target'] ).transpose() ], sort=False, axis=0 ).\
transpose().groupby('target' ).agg(sum ).transpose()
res_list[j].append(res[1])
gm_list[j].append(gm)
ctc_list[j].append(cluster_to_class)
acc[j,k] =(res.values.argmax(1)== yf.values ).mean()
all_acc[i,j,k] = acc[j,k]
all_roc[i,j,k] = roc_auc_score(yf.values, res[1])
best_j = acc.mean(1 ).argmax()
j_selection[best_j] += 1
for k in np.argsort(acc[best_j,:])[-N_TEST:]:
res2 = pd.concat([pd.DataFrame(gm_list[best_j][k].predict_proba(test3), columns = cc_list[best_j]),
pd.DataFrame(ctc_list[best_j][k], index=cc_list[best_j],
columns=['target'] ).transpose() ], sort=False, axis=0 ).\
transpose().groupby('target' ).agg(sum ).transpose()
oof[idx1] += res_list[best_j][k]/N_TEST
preds[idx2] += res2[1]/N_TEST
if i%10==0: print('QMM scores CV =',round(roc_auc_score(train['target'],oof),5))
auc = roc_auc_score(train['target'],oof)
print(j_selection)
print('Final QMM scores CV =',round(auc,5))
|
Instant Gratification
|
4,275,635 |
pred = model.predict_classes(x_test,verbose=1 )<load_from_csv>
|
for j in range(N_CLUST_OPT):
print(np.all(cluster_cnt[:,j,:] == 0.5*nc_list[j]))
|
Instant Gratification
|
4,275,635 |
<save_to_csv><EOS>
|
sub = pd.read_csv('.. /input/sample_submission.csv')
sub['target'] = preds
sub.to_csv('submission.csv',index=False)
plt.hist(preds,bins=100)
plt.title('Final Test.csv predictions')
plt.show()
|
Instant Gratification
|
3,125,588 |
def build_model(transformer, max_len=512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
sequence_output = transformer(input_word_ids)[0]
cls_token = sequence_output[:, 0, :]
out = Dense(1, activation='sigmoid' )(cls_token)
model = Model(inputs=input_word_ids, outputs=out)
model.compile(Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])
return model<load_from_csv>
|
from sklearn.metrics import log_loss
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv" )<load_pretrained>
|
teams = pd.read_csv('.. /input/wdatafiles/WTeams.csv')
teams2 = pd.read_csv('.. /input/wdatafiles/WTeamSpellings.csv', encoding='latin-1')
season_cresults = pd.read_csv('.. /input/wdatafiles/WRegularSeasonCompactResults.csv')
season_dresults = pd.read_csv('.. /input/wdatafiles/WRegularSeasonDetailedResults.csv')
tourney_cresults = pd.read_csv('.. /input/wdatafiles/WNCAATourneyCompactResults.csv')
tourney_dresults = pd.read_csv('.. /input/wdatafiles/WNCAATourneyDetailedResults.csv')
slots = pd.read_csv('.. /input/wdatafiles/WNCAATourneySlots.csv')
seeds = pd.read_csv('.. /input/wdatafiles/WNCAATourneySeeds.csv')
seeds = {'_'.join(map(str,[int(k1),k2])) :int(v[1:3])for k1, v, k2 in seeds[['Season', 'Seed', 'TeamID']].values}
seeds = {**seeds, **{k.replace('2018_','2019_'):seeds[k] for k in seeds if '2018_' in k}}
cities = pd.read_csv('.. /input/wdatafiles/WCities.csv')
gcities = pd.read_csv('.. /input/wdatafiles/WGameCities.csv')
seasons = pd.read_csv('.. /input/wdatafiles/WSeasons.csv')
sub = pd.read_csv('.. /input/WSampleSubmissionStage1.csv' )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
%%time
transformer_layer = transformers.TFDistilBertModel.from_pretrained('distilbert-base-uncased')
tokenizer = transformers.DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )<categorify>
|
teams2 = teams2.groupby(by='TeamID', as_index=False)['TeamNameSpelling'].count()
teams2.columns = ['TeamID', 'TeamNameCount']
teams = pd.merge(teams, teams2, how='left', on=['TeamID'])
del teams2
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
train_input = bert_encode(train.text.values, tokenizer, max_len=160)
test_input = bert_encode(test.text.values, tokenizer, max_len=160)
train_labels = train.target.values<train_model>
|
season_cresults['ST'] = 'S'
season_dresults['ST'] = 'S'
tourney_cresults['ST'] = 'T'
tourney_dresults['ST'] = 'T'
games = pd.concat(( season_dresults, tourney_dresults), axis=0, ignore_index=True)
games.reset_index(drop=True, inplace=True)
games['WLoc'] = games['WLoc'].map({'A': 1, 'H': 2, 'N': 3} )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
train_history = model.fit(
train_input, train_labels,
validation_split=0.2,
epochs=4,
batch_size=32
)<save_to_csv>
|
games['ID'] = games.apply(lambda r: '_'.join(map(str, [r['Season']]+sorted([r['WTeamID'],r['LTeamID']]))), axis=1)
games['IDTeams'] = games.apply(lambda r: '_'.join(map(str, sorted([r['WTeamID'],r['LTeamID']]))), axis=1)
games['Team1'] = games.apply(lambda r: sorted([r['WTeamID'],r['LTeamID']])[0], axis=1)
games['Team2'] = games.apply(lambda r: sorted([r['WTeamID'],r['LTeamID']])[1], axis=1)
games['IDTeam1'] = games.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team1']])) , axis=1)
games['IDTeam2'] = games.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team2']])) , axis=1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
test_pred = model.predict(test_input, verbose=1)
submission['target'] = test_pred.round().astype(int)
submission.to_csv('submission.csv', index=False )<import_modules>
|
games['Team1Seed'] = games['IDTeam1'].map(seeds ).fillna(0)
games['Team2Seed'] = games['IDTeam2'].map(seeds ).fillna(0 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
import gc
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers, optimizers
from tensorflow.keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split<load_from_csv>
|
games['ScoreDiff'] = games['WScore'] - games['LScore']
games['Pred'] = games.apply(lambda r: 1.if sorted([r['WTeamID'],r['LTeamID']])[0]==r['WTeamID'] else 0., axis=1)
games['ScoreDiffNorm'] = games.apply(lambda r: r['ScoreDiff'] * -1 if r['Pred'] == 0.else r['ScoreDiff'], axis=1)
games['SeedDiff'] = games['Team1Seed'] - games['Team2Seed']
games = games.fillna(-1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
X = pd.read_csv('.. /input/train.csv')
X_test = pd.read_csv('.. /input/test.csv')
Y = X[['label']]
X = X.drop(["label"], axis=1)
X_train = X.values.reshape(X.shape[0], 28, 28, 1)
Y_train = tf.keras.utils.to_categorical(Y.values, 10)
X_test = X_test.values.reshape(X_test.shape[0], 28, 28, 1 )<split>
|
c_score_col = ['NumOT', 'WFGM', 'WFGA', 'WFGM3', 'WFGA3', 'WFTM', 'WFTA', 'WOR', 'WDR', 'WAst', 'WTO', 'WStl',
'WBlk', 'WPF', 'LFGM', 'LFGA', 'LFGM3', 'LFGA3', 'LFTM', 'LFTA', 'LOR', 'LDR', 'LAst', 'LTO', 'LStl',
'LBlk', 'LPF']
c_score_agg = ['sum', 'mean', 'median', 'max', 'min', 'std', 'skew', 'nunique']
gb = games.groupby(by=['IDTeams'] ).agg({k: c_score_agg for k in c_score_col} ).reset_index()
gb.columns = [''.join(c)+ '_c_score' for c in gb.columns]
games = games[games['ST']=='T']
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=42)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_val = X_val.astype('float32')
X_train /= 255
X_test /= 255
X_val /= 255<train_model>
|
sub['WLoc'] = 3
sub['Season'] = sub['ID'].map(lambda x: x.split('_')[0])
sub['Season'] = sub['ID'].map(lambda x: x.split('_')[0])
sub['Season'] = sub['Season'].astype(int)
sub['Team1'] = sub['ID'].map(lambda x: x.split('_')[1])
sub['Team2'] = sub['ID'].map(lambda x: x.split('_')[2])
sub['IDTeams'] = sub.apply(lambda r: '_'.join(map(str, [r['Team1'], r['Team2']])) , axis=1)
sub['IDTeam1'] = sub.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team1']])) , axis=1)
sub['IDTeam2'] = sub.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team2']])) , axis=1)
sub['Team1Seed'] = sub['IDTeam1'].map(seeds ).fillna(0)
sub['Team2Seed'] = sub['IDTeam2'].map(seeds ).fillna(0)
sub['SeedDiff'] = sub['Team1Seed'] - sub['Team2Seed']
sub = sub.fillna(-1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.10,
width_shift_range=0.1,
height_shift_range=0.1)
datagen.fit(X_train )<choose_model_class>
|
games = pd.merge(games, gb, how='left', left_on='IDTeams', right_on='IDTeams_c_score')
sub = pd.merge(sub, gb, how='left', left_on='IDTeams', right_on='IDTeams_c_score' )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
model = tf.keras.Sequential()
model.add(layers.Conv2D(32, kernel_size=(5, 5),
activation='relu',
padding='same',
input_shape=(28, 28, 1)))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(32,(5, 5), activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(64,(3, 3), activation='relu', padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(64,(3, 3), activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(128,(3, 3), activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.3))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.3))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.3))
model.add(layers.Dense(10, activation='softmax'))
model.summary()<choose_model_class>
|
col = [c for c in games.columns if c not in ['ID', 'DayNum', 'ST', 'Team1', 'Team2', 'IDTeams', 'IDTeam1', 'IDTeam2', 'WTeamID', 'WScore', 'LTeamID', 'LScore', 'NumOT', 'Pred', 'ScoreDiff', 'ScoreDiffNorm', 'WLoc'] + c_score_col]
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
model.compile(loss="categorical_crossentropy",
optimizer=optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor='val_acc',
factor=0.5,
patience=3,
min_lr=0.00001,
verbose=1 )<train_model>
|
forest = RandomForestRegressor(n_estimators=100, bootstrap=True, n_jobs=-1)
forest.fit(games[col].fillna(-1), games['Pred'] )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
model.fit(datagen.flow(X_train, Y_train, batch_size=128),
epochs=30,
validation_data=(X_val, Y_val),
verbose=1,
steps_per_epoch=X_train.shape[0] // 128,
callbacks=[reduce_lr] )<save_to_csv>
|
predictions = forest.predict(games[col].fillna(-1)).clip(0,1)
print('Log Loss:', log_loss(games['Pred'], predictions))
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,125,588 |
<set_options><EOS>
|
sub['Pred'] = forest.predict(sub[col].fillna(-1)).clip(0,1)
sub[['ID', 'Pred']].to_csv('fibal.csv', index=False )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,102,219 |
<SOS> metric: LogLoss Kaggle data source: womens-machine-learning-competition-2019<categorify>
|
df1 = pd.read_csv('.. /input/myncaa/W-0.1-hamid.csv')
df2 = pd.read_csv('.. /input/myncaa/W-0.12-duketemon.csv')
df1.head()
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,102,219 |
def label_encode(df, column_name):
ordered_column = np.sort(df[column_name].unique())
df[column_name] = df[column_name].map(
dict(zip(np.sort(df[column_name].unique()),[x for x in range(len(df[column_name].unique())) ]))
)
return df
def compare(df,column_name, with_table=False, with_graph=True, compare_to='Survived'):
if with_table:
print(df[df[compare_to] < 3].groupby([compare_to,column_name] ).size().sort_index())
if with_graph:
g = sns.FacetGrid(df, col=compare_to ).map(sns.distplot, column_name)
def show_correlation(df, column_name='Survived'):
return df.corr() [column_name].apply(abs ).sort_values(na_position='first' ).reset_index()
def get_IQR(df, column_name):
Q3 = df[column_name].quantile(0.75)
Q1 = df[column_name].quantile(0.25)
IQR = Q3 - Q1
return Q1, Q3, IQR
def detect_outliers(df, n, features):
outlier_indices = []
for col in features:
Q1, Q3, IQR = get_IQR(df, col)
outlier_step = 1.5 * IQR
outlier_list_col = df[(df[col] < Q1 - outlier_step)|(df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list(k for k, v in outlier_indices.items() if v > n)
return multiple_outliers<load_from_csv>
|
df1['Pred'] =.94*df1['Pred'] +.06*df2['Pred']
df1.head()
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,102,219 |
<feature_engineering><EOS>
|
df1.to_csv('sub.csv',index=False )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
<SOS> metric: LogLoss Kaggle data source: womens-machine-learning-competition-2019<feature_engineering>
|
%matplotlib inline
InteractiveShell.ast_node_interactivity = "all"
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['Surname'] = all_set['Name'].apply(lambda x: x.split(',')[0].strip() )<feature_engineering>
|
data_dir = '.. /input/stage2wdatafiles/'
df_seed = pd.read_csv(data_dir + 'WNCAATourneySeeds.csv')
df_result = pd.read_csv(data_dir + 'WNCAATourneyCompactResults.csv')
df_seed.tail(3)
df_result.tail(3 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['FamilySurvival'] = 0.5
for surname in all_set['Surname'].unique() :
df = all_set[all_set['Surname'] == surname]
if df.shape[0] > 1:
smin = df['Survived'].min()
smax = df['Survived'].max()
for idx, row in df.iterrows() :
passengerid = row['PassengerId']
if smax == 1.0:
all_set.loc[all_set['PassengerId'] == passengerid, 'FamilySurvival'] = 1.0
elif smin == 0.0:
all_set.loc[all_set['PassengerId'] == passengerid, 'FamilySurvival'] = 0.0<count_values>
|
def seed_to_int(seed):
s_int = int(seed[1:3])
return s_int
def clean_df(df_seed, df_result):
df_seed['seed_int'] = df_seed['Seed'].apply(seed_to_int)
df_seed.drop(['Seed'], axis=1, inplace=True)
df_result.drop(['DayNum', 'WLoc', 'NumOT'], axis=1, inplace=True)
return df_seed, df_result
df_seed, df_result = clean_df(df_seed, df_result)
df_seed.head(3)
df_result.head(3 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['Embarked'].value_counts()<feature_engineering>
|
def merge_seed_result(df_seed, df_result):
df_win_seed = df_seed.rename(columns={'TeamID':'WTeamID', 'seed_int':'WSeed'})
df_loss_seed = df_seed.rename(columns={'TeamID':'LTeamID', 'seed_int':'LSeed'})
df_result = df_result.merge(df_win_seed, how='left', on=['Season', 'WTeamID'])
df_result = df_result.merge(df_loss_seed, how='left', on=['Season', 'LTeamID'])
df_result['SeedDiff'] = np.abs(df_result['WSeed'] - df_result['LSeed'])
df_result['ScoreDiff'] = np.abs(df_result['WScore'] - df_result['LScore'])
return df_result
df_result = merge_seed_result(df_seed, df_result)
df_result.head(3 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['Embarked'] = all_set['Embarked'].fillna('S' )<count_missing_values>
|
df_result = df_result[df_result['ScoreDiff']>3]
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set[all_set['Fare'].isna() ]<data_type_conversions>
|
df_result['upset'] = [1 if ws > ls else 0 for ws, ls, in zip(df_result["WSeed"], df_result["LSeed"])]
print("upset probability")
df_result['upset'].value_counts() / len(df_result)* 100
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['Fare'].fillna(all_set[all_set['Embarked'] == 'S']['Fare'].mean() , inplace=True )<feature_engineering>
|
this_season=2019
total_season=10
train = df_result[(df_result["Season"]>=(this_season - total_season)) &(df_result["Season"]<(this_season-1)) ]
print(train.shape )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['Deck'] = all_set['Cabin'].apply(lambda x: x[0] if type(x)== str else '' )<filter>
|
df_result["Seed_combi"]=[str(ws)+'_'+str(ls)if ws<ls else str(ls)+'_'+str(ws)for ws, ls in zip(df_result["WSeed"], df_result["LSeed"])]
df_result.head(3 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.