kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
3,525,312 |
all_set[all_set['Deck'] == 'G']<feature_engineering>
|
df_result_aggs = pd.DataFrame()
df_result_filter_aggs = pd.DataFrame()
df_result_season = df_result[(df_result["Season"]>=(this_season - total_season)) &(df_result["Season"]<(this_season-1)) ]
for value in range(16):
df_result_agg = df_result_season[df_result_season["SeedDiff"]==value].groupby("SeedDiff" ).agg({"upset": ["mean", "count"]})
df_result_agg.columns = [col[0]+"_"+col[1]+"_"+"all" for col in df_result_agg.columns]
df_result_filter_agg = df_result_season[df_result_season["SeedDiff"]==value].groupby("Seed_combi" ).agg({"upset": ["mean", "count"]})
df_result_filter_agg.columns = [col[0]+"_"+col[1] for col in df_result_filter_agg.columns]
if value==0:
df_result_agg["upset_mean_all"] = 0.5
df_result_filter_agg["upset_mean"] = 0.5
df_result_aggs = pd.concat([df_result_aggs, df_result_agg])
df_result_filter_aggs = pd.concat([df_result_filter_aggs, df_result_filter_agg])
df_result_aggs
df_result_filter_aggs.tail(10 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
deck_fare = {}
for deck in all_set['Deck'].unique() :
if len(deck)== 1 and deck in 'ABCDEF':
deck_fare[deck] = all_set[
(all_set['Cabin'].apply(lambda x: True if type(x)== str else False)) &
(all_set['Deck'] == deck)
]['Fare'].mean()
deck_fare<feature_engineering>
|
df_result = df_result.join(df_result_aggs, how='left', on="SeedDiff" ).join(df_result_filter_aggs, how='left', on='Seed_combi')
df_result["upset_prob"] = [m if c > 20 else a for a, m, c in zip(df_result["upset_mean_all"], df_result["upset_mean"], df_result["upset_count"])]
df_result.tail()
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
def find_deck(fare):
dist = 1000
res = 'F'
for key in deck_fare.keys() :
new_dist = np.abs(fare - deck_fare[key])
if new_dist < dist:
dist = new_dist
res = key
return res
all_set.loc[all_set['Cabin'].isna() , 'Deck'] = all_set['Fare'].apply(find_deck )<feature_engineering>
|
valid = df_result[(df_result["Season"]==(this_season-1)) ]
log_loss(valid['upset'], valid['upset_prob'] )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['Family'] = 1 + all_set['SibSp'] + all_set['Parch']
all_set['Alone'] = all_set['Family'].apply(lambda x: 1 if x == 1 else 0 )<feature_engineering>
|
valid = df_result[(df_result["Season"]==(this_season-1)) ]
valid = valid.join(df_result_aggs.drop("upset_count_all", axis=1), how='left', on='SeedDiff')
valid.fillna(0, inplace=True)
log_loss(valid['upset'], valid['upset_prob_manually'] )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
age_by_title = {}
for title in all_set['Title'].unique() :
age_by_title[title] = all_set[
(all_set['Age'].apply(lambda x: True if type(x)== float else False)) &
(all_set['Title'] == title)
]['Age'].mean()
age_by_title<feature_engineering>
|
log_loss(valid['upset'], np.clip(valid['upset_prob_manually'], 0.05, 0.95))
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set.loc[all_set['Age'].isna() , 'Age'] = all_set['Title'].apply(lambda x: age_by_title[x] )<categorify>
|
df_seed_2019 = df_seed[df_seed["Season"]==2019]
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['AgeBin'] = pd.qcut(all_set['Age'], 5)
all_set['AgeCode'] = all_set['AgeBin']
all_set = label_encode(all_set, 'AgeCode' )<feature_engineering>
|
this_season=2019
total_season=10
train = df_result[(df_result["Season"]>=(this_season - total_season)) ]
print(train.shape)
df_result["Seed_combi"]=[str(ws)+'_'+str(ls)if ws<ls else str(ls)+'_'+str(ws)for ws, ls in zip(df_result["WSeed"], df_result["LSeed"])]
df_result.head()
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['AgeDist'] = 1/ np.exp(np.abs(all_set['Age'] - all_set[all_set['Survived'] == 1]['Age'].median()))<categorify>
|
df_result_aggs = pd.DataFrame()
df_result_filter_aggs = pd.DataFrame()
for value in range(16):
df_result_agg = df_result[df_result["SeedDiff"]==value].groupby("SeedDiff" ).agg({"upset": ["mean", "count"]})
df_result_agg.columns = [col[0]+"_"+col[1]+"_"+"all" for col in df_result_agg.columns]
df_result_filter_agg = df_result[df_result["SeedDiff"]==value].groupby("Seed_combi" ).agg({"upset": ["mean", "count"]})
df_result_filter_agg.columns = [col[0]+"_"+col[1] for col in df_result_filter_agg.columns]
if value==0:
df_result_agg["upset_mean_all"] = 0.5
df_result_filter_agg["upset_mean"] = 0.5
df_result_aggs = pd.concat([df_result_aggs, df_result_agg])
df_result_filter_aggs = pd.concat([df_result_filter_aggs, df_result_filter_agg])
df_result_aggs
df_result_filter_aggs.tail(10 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set = label_encode(all_set, 'Sex')
compare(all_set, 'Sex' )<categorify>
|
df_result_aggs.loc[0, 'upset_mean_all'] = 0.5
df_result_aggs.loc[10, 'upset_mean_all'] =(0.0 + df_result_aggs.loc[11, 'upset_mean_all'])/ 2
df_result_aggs.loc[11, 'upset_mean_all'] =(0.0 + df_result_aggs.loc[15, 'upset_mean_all'])/ 2
df_result_aggs.loc[12, 'upset_mean_all'] =(0.0 + df_result_aggs.loc[15, 'upset_mean_all'])/ 2
df_result_aggs.loc[13, 'upset_mean_all'] =(0.0 + df_result_aggs.loc[15, 'upset_mean_all'])/ 2
df_result_aggs.loc[14, 'upset_mean_all'] =(0.0 + df_result_aggs.loc[15, 'upset_mean_all'])/ 2
df_result_aggs = df_result_aggs.fillna(-1)
sns.barplot(df_result_aggs.index, df_result_aggs.upset_mean_all)
plt.title('probability of upset based on past result aggretation')
plt.show()
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set = label_encode(all_set, 'Embarked')
compare(all_set, 'Embarked' )<categorify>
|
test = pd.read_csv(".. /input/WSampleSubmissionStage2.csv")
test = pd.DataFrame(np.array([ID.split("_")for ID in test["ID"]]), columns=["Season", "TeamA", "TeamB"], dtype=int)
test.head(3)
test = test.merge(df_seed_2019, how='left', left_on=["Season", "TeamA"], right_on=["Season", "TeamID"])
test = test.rename(columns={"seed_int": "TeamA_seed"} ).drop("TeamID", axis=1)
test = test.merge(df_seed_2019, how='left', left_on=["Season", "TeamB"], right_on=["Season", "TeamID"])
test = test.rename(columns={"seed_int": "TeamB_seed"} ).drop("TeamID", axis=1)
test['SeedDiff'] = np.abs(test.TeamA_seed - test.TeamB_seed)
test.head(3 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set = label_encode(all_set, 'Deck')
compare(all_set, 'Deck' )<count_values>
|
test["Seed_combi"]=[str(a)+'_'+str(b)if a<b else str(b)+'_'+str(a)for a, b in zip(test["TeamA_seed"], test["TeamB_seed"])]
test.head()
test = test.join(df_result_aggs, how='left', on="SeedDiff" ).join(df_result_filter_aggs, how='left', on='Seed_combi' ).fillna(-1)
test["upset_prob"] = [m if c > 20 else a for a, m, c in zip(test["upset_mean_all"], test["upset_mean"], test["upset_count"])]
test["win_prob"] = [(1-upset_prob)if teamA<teamB else upset_prob if teamA>teamB else 0.5
for teamA, teamB, upset_prob in zip(test['TeamA_seed'], test['TeamB_seed'], test['upset_prob'])]
test.tail()
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
all_set['Title'].value_counts()<count_unique_values>
|
submit = pd.read_csv(".. /input/WSampleSubmissionStage2.csv")
submit["Pred"] = test['win_prob']
submit.to_csv("submission_agg_all_manually_noclip.csv", index=False)
submit.head()
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,525,312 |
<categorify><EOS>
|
clipped_sub = np.clip(test['win_prob'], 0.05, 0.95)
submit = pd.read_csv(".. /input/WSampleSubmissionStage2.csv")
submit["Pred"] = clipped_sub
submit.to_csv("submission_agg_all_manually_cliped.csv", index=False)
submit.head()
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
<SOS> metric: LogLoss Kaggle data source: womens-machine-learning-competition-2019<categorify>
|
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import ExtraTreesClassifier
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
all_set = label_encode(all_set, 'Title')
compare(all_set, 'Title' )<feature_engineering>
|
teams = pd.read_csv('.. /input/wdatafiles/WTeams.csv')
teams2 = pd.read_csv('.. /input/wdatafiles/WTeamSpellings.csv', encoding='latin-1')
season_cresults = pd.read_csv('.. /input/wdatafiles/WRegularSeasonCompactResults.csv')
season_dresults = pd.read_csv('.. /input/wdatafiles/WRegularSeasonDetailedResults.csv')
tourney_cresults = pd.read_csv('.. /input/wdatafiles/WNCAATourneyCompactResults.csv')
tourney_dresults = pd.read_csv('.. /input/wdatafiles/WNCAATourneyDetailedResults.csv')
slots = pd.read_csv('.. /input/wdatafiles/WNCAATourneySlots.csv')
seeds = pd.read_csv('.. /input/wdatafiles/WNCAATourneySeeds.csv')
seeds = {'_'.join(map(str,[int(k1),k2])) :int(v[1:3])for k1, v, k2 in seeds[['Season', 'Seed', 'TeamID']].values}
seeds = {**seeds, **{k.replace('2018_','2019_'):seeds[k] for k in seeds if '2018_' in k}}
cities = pd.read_csv('.. /input/wdatafiles/WCities.csv')
gcities = pd.read_csv('.. /input/wdatafiles/WGameCities.csv')
seasons = pd.read_csv('.. /input/wdatafiles/WSeasons.csv')
sub = pd.read_csv('.. /input/WSampleSubmissionStage2.csv' )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
all_set['FarePerFamilyMember'] = all_set['Fare'] / all_set['Family']
all_set['FareBin'] = pd.qcut(all_set['Fare'], 5)
all_set['FareCode'] = all_set['FareBin']
all_set = label_encode(all_set, 'FareCode')
all_set['Fare'] = all_set['Fare'].apply(lambda x: np.log(x)if x > 0 else np.log(3.0))<drop_column>
|
teams2 = teams2.groupby(by='TeamID', as_index=False)['TeamNameSpelling'].count()
teams2.columns = ['TeamID', 'TeamNameCount']
teams = pd.merge(teams, teams2, how='left', on=['TeamID'])
del teams2
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
all_tmp = all_set.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin', 'AgeBin', 'AgeCode', 'AgeDist', 'FareBin', 'FareCode'] )<categorify>
|
season_cresults['ST'] = 'S'
season_dresults['ST'] = 'S'
tourney_cresults['ST'] = 'T'
tourney_dresults['ST'] = 'T'
games = pd.concat(( season_dresults, tourney_dresults), axis=0, ignore_index=True)
games.reset_index(drop=True, inplace=True)
games['WLoc'] = games['WLoc'].map({'A': 1, 'H': 2, 'N': 3} )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
all_dum = pd.get_dummies(all_tmp, drop_first=True)
print(all_dum.shape)
train_set = all_dum[all_dum['Survived'] < 3].copy()
test_set = all_dum[all_dum['Survived'] == 3].copy()<prepare_x_and_y>
|
games['ID'] = games.apply(lambda r: '_'.join(map(str, [r['Season']]+sorted([r['WTeamID'],r['LTeamID']]))), axis=1)
games['IDTeams'] = games.apply(lambda r: '_'.join(map(str, sorted([r['WTeamID'],r['LTeamID']]))), axis=1)
games['Team1'] = games.apply(lambda r: sorted([r['WTeamID'],r['LTeamID']])[0], axis=1)
games['Team2'] = games.apply(lambda r: sorted([r['WTeamID'],r['LTeamID']])[1], axis=1)
games['IDTeam1'] = games.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team1']])) , axis=1)
games['IDTeam2'] = games.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team2']])) , axis=1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
X = train_set.copy()
try:
X.drop(columns=['Survived'], inplace=True)
except:
pass
y = train_set['Survived'].copy()
scaler = StandardScaler().fit(X)
scl_X = scaler.transform(X)
tX = test_set.copy()
try:
tX.drop(columns=['Survived'], inplace=True)
except:
pass
scl_tX = scaler.transform(tX )<split>
|
games['Team1Seed'] = games['IDTeam1'].map(seeds ).fillna(0)
games['Team2Seed'] = games['IDTeam2'].map(seeds ).fillna(0 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
train_X, val_X, train_y, val_y = train_test_split(scl_X, y, test_size=0.33, random_state=42, stratify=y )<train_model>
|
games['ScoreDiff'] = games['WScore'] - games['LScore']
games['Pred'] = games.apply(lambda r: 1.if sorted([r['WTeamID'],r['LTeamID']])[0]==r['WTeamID'] else 0., axis=1)
games['ScoreDiffNorm'] = games.apply(lambda r: r['ScoreDiff'] * -1 if r['Pred'] == 0.else r['ScoreDiff'], axis=1)
games['SeedDiff'] = games['Team1Seed'] - games['Team2Seed']
games = games.fillna(-1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
rfc = RandomForestClassifier(n_estimators=100)
rfc.fit(train_X, train_y)
score = rfc.score(val_X, val_y)
print('RandomForestClassifier =', score)
pred_y = rfc.predict(val_X)
target_names = ['DEAD', 'SURVIVED']
print(classification_report(val_y, pred_y,target_names=target_names))<find_best_params>
|
sub['WLoc'] = 3
sub['Season'] = sub['ID'].map(lambda x: x.split('_')[0])
sub['Season'] = sub['ID'].map(lambda x: x.split('_')[0])
sub['Season'] = sub['Season'].astype(int)
sub['Team1'] = sub['ID'].map(lambda x: x.split('_')[1])
sub['Team2'] = sub['ID'].map(lambda x: x.split('_')[2])
sub['IDTeams'] = sub.apply(lambda r: '_'.join(map(str, [r['Team1'], r['Team2']])) , axis=1)
sub['IDTeam1'] = sub.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team1']])) , axis=1)
sub['IDTeam2'] = sub.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team2']])) , axis=1)
sub['Team1Seed'] = sub['IDTeam1'].map(seeds ).fillna(0)
sub['Team2Seed'] = sub['IDTeam2'].map(seeds ).fillna(0)
sub['SeedDiff'] = sub['Team1Seed'] - sub['Team2Seed']
sub = sub.fillna(-1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
best_model = 0
best_score = score<find_best_params>
|
games = pd.merge(games, gb, how='left', left_on='IDTeams', right_on='IDTeams_c_score')
sub = pd.merge(sub, gb, how='left', left_on='IDTeams', right_on='IDTeams_c_score' )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
clfs = []
clfs.append(( 'ada', AdaBoostClassifier))
clfs.append(( 'bag', BaggingClassifier))
clfs.append(( 'rnd', RandomForestClassifier))
clfs.append(( 'knn', KNeighborsClassifier))
clfs.append(( 'mlp', MLPClassifier))
clfs.append(( 'ext', ExtraTreesClassifier))
clfs.append(( 'log', LogisticRegression))
clfs.append(( 'gbm', GradientBoostingClassifier))
params = []
params.append({'n_estimators': np.arange(10,500,10), 'learning_rate':[float(x/100.) for x in np.arange(1,10)]})
params.append({'n_estimators': np.arange(10,500,10)})
params.append({'n_estimators': np.arange(10,500,10)})
params.append({'n_neighbors': np.arange(3,15)})
params.append({'hidden_layer_sizes': [(100,),(200,),(300,),(400,),(500,)]})
params.append({'n_estimators': np.arange(10,200,10)})
params.append({'max_iter': np.arange(10,500,10)})
params.append({'n_estimators': np.arange(10,500,10), 'learning_rate':[float(x/100.) for x in np.arange(1,10)], 'max_depth':np.arange(3,10)} )<train_on_grid>
|
col = [c for c in games.columns if c not in ['ID', 'DayNum', 'ST', 'Team1', 'Team2', 'IDTeams', 'IDTeam1', 'IDTeam2', 'WTeamID', 'WScore', 'LTeamID', 'LScore', 'NumOT', 'Pred', 'ScoreDiff', 'ScoreDiffNorm', 'WLoc'] + c_score_col]
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
best_estimators = []
best_params = []
estimators_weights = []
k = len(params)
for idx in range(len(clfs)) :
gs = RandomizedSearchCV(clfs[idx][1]() , params[idx], cv=5)
gs.fit(train_X, train_y)
estimators_weights.append(gs.score(val_X, val_y))
best_estimators.append(gs.best_estimator_)
best_params.append(gs.best_params_)
print(k, clfs[idx][0], gs.best_params_)
k -= 1<define_variables>
|
model = ExtraTreesClassifier(n_estimators=200)
model.fit(games[col].fillna(-1), games['Pred'])
predictions = model.predict(games[col].fillna(-1)).clip(0,1)
print('Log Loss:', log_loss(games['Pred'], predictions))
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
estimator_list = list(zip([name for(name, model)in clfs], best_estimators))<train_model>
|
sub['Pred'] = model.predict(sub[col].fillna(-1)).clip(1,0)
sub[['ID', 'Pred']].to_csv('submission_et.csv', index=False )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,317,893 |
<predict_on_test><EOS>
|
FileLink('./submission_et.csv' )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
<SOS> metric: LogLoss Kaggle data source: womens-machine-learning-competition-2019<train_model>
|
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import AdaBoostClassifier
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
fit_X = np.zeros(( train_y.shape[0], len(best_estimators)))
fit_X = pd.DataFrame(fit_X)
pred_X = np.zeros(( val_y.shape[0], len(best_estimators)))
pred_X = pd.DataFrame(pred_X)
test_X = np.zeros(( scl_tX.shape[0], len(best_estimators)))
test_X = pd.DataFrame(test_X)
print("Fitting models.")
cols = list()
for i,(name, m)in enumerate(estimator_list):
print("%s..." % name, end=" ", flush=False)
fit_X.iloc[:, i] = m.predict_proba(train_X)[:, 1]
pred_X.iloc[:, i] = m.predict_proba(val_X)[:, 1]
test_X.iloc[:, i] = m.predict_proba(scl_tX)[:, 1]
cols.append(name)
print("done")
fit_X.columns = cols
pred_X.columns = cols
test_X.columns = cols<train_on_grid>
|
teams = pd.read_csv('.. /input/wdatafiles/WTeams.csv')
teams2 = pd.read_csv('.. /input/wdatafiles/WTeamSpellings.csv', encoding='latin-1')
season_cresults = pd.read_csv('.. /input/wdatafiles/WRegularSeasonCompactResults.csv')
season_dresults = pd.read_csv('.. /input/wdatafiles/WRegularSeasonDetailedResults.csv')
tourney_cresults = pd.read_csv('.. /input/wdatafiles/WNCAATourneyCompactResults.csv')
tourney_dresults = pd.read_csv('.. /input/wdatafiles/WNCAATourneyDetailedResults.csv')
slots = pd.read_csv('.. /input/wdatafiles/WNCAATourneySlots.csv')
seeds = pd.read_csv('.. /input/wdatafiles/WNCAATourneySeeds.csv')
seeds = {'_'.join(map(str,[int(k1),k2])) :int(v[1:3])for k1, v, k2 in seeds[['Season', 'Seed', 'TeamID']].values}
seeds = {**seeds, **{k.replace('2018_','2019_'):seeds[k] for k in seeds if '2018_' in k}}
cities = pd.read_csv('.. /input/wdatafiles/WCities.csv')
gcities = pd.read_csv('.. /input/wdatafiles/WGameCities.csv')
seasons = pd.read_csv('.. /input/wdatafiles/WSeasons.csv')
sub = pd.read_csv('.. /input/WSampleSubmissionStage1.csv' )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
meta_estimator = GradientBoostingClassifier()
meta_params = {'n_estimators': np.arange(10,500,10), 'learning_rate':[float(x/100.) for x in np.arange(1,10)], 'max_depth':np.arange(3,10)}
meta_estimator = RandomizedSearchCV(GradientBoostingClassifier() , meta_params, cv=5)
meta_estimator.fit(fit_X, train_y)
score = meta_estimator.score(pred_X, val_y)
print('MetaEstimator =', score)
if score > best_score:
best_model = 2
best_score = score<predict_on_test>
|
teams2 = teams2.groupby(by='TeamID', as_index=False)['TeamNameSpelling'].count()
teams2.columns = ['TeamID', 'TeamNameCount']
teams = pd.merge(teams, teams2, how='left', on=['TeamID'])
del teams2
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
pred_y = meta_estimator.predict(pred_X)
target_names = ['DEAD', 'SURVIVED']
print(classification_report(val_y, pred_y,target_names=target_names))<train_model>
|
season_cresults['ST'] = 'S'
season_dresults['ST'] = 'S'
tourney_cresults['ST'] = 'T'
tourney_dresults['ST'] = 'T'
games = pd.concat(( season_dresults, tourney_dresults), axis=0, ignore_index=True)
games.reset_index(drop=True, inplace=True)
games['WLoc'] = games['WLoc'].map({'A': 1, 'H': 2, 'N': 3} )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
models = ['RandomForestClassifier', 'VotingClassifier', 'MetaEstimator']
print('Best model = ', models[best_model] )<save_to_csv>
|
games['ID'] = games.apply(lambda r: '_'.join(map(str, [r['Season']]+sorted([r['WTeamID'],r['LTeamID']]))), axis=1)
games['IDTeams'] = games.apply(lambda r: '_'.join(map(str, sorted([r['WTeamID'],r['LTeamID']]))), axis=1)
games['Team1'] = games.apply(lambda r: sorted([r['WTeamID'],r['LTeamID']])[0], axis=1)
games['Team2'] = games.apply(lambda r: sorted([r['WTeamID'],r['LTeamID']])[1], axis=1)
games['IDTeam1'] = games.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team1']])) , axis=1)
games['IDTeam2'] = games.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team2']])) , axis=1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
if best_model == 0:
predictions = rfc.predict(scl_tX)
elif best_model == 1:
predictions = vclf.predict(scl_tX)
else:
predictions = meta_estimator.predict(test_X)
PassengerId = test_df['PassengerId'].values
results = pd.DataFrame({ 'PassengerId': PassengerId, 'Survived': predictions })
results.to_csv('results.csv', index=False )<load_from_csv>
|
games['Team1Seed'] = games['IDTeam1'].map(seeds ).fillna(0)
games['Team2Seed'] = games['IDTeam2'].map(seeds ).fillna(0 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
gender_data = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv' )<drop_column>
|
games['ScoreDiff'] = games['WScore'] - games['LScore']
games['Pred'] = games.apply(lambda r: 1.if sorted([r['WTeamID'],r['LTeamID']])[0]==r['WTeamID'] else 0., axis=1)
games['ScoreDiffNorm'] = games.apply(lambda r: r['ScoreDiff'] * -1 if r['Pred'] == 0.else r['ScoreDiff'], axis=1)
games['SeedDiff'] = games['Team1Seed'] - games['Team2Seed']
games = games.fillna(-1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
train_data = train_data.drop(['Ticket', 'Cabin'], axis = 1)
test_data = test_data.drop(['Ticket', 'Cabin'], axis = 1 )<feature_engineering>
|
sub['WLoc'] = 3
sub['Season'] = sub['ID'].map(lambda x: x.split('_')[0])
sub['Season'] = sub['ID'].map(lambda x: x.split('_')[0])
sub['Season'] = sub['Season'].astype(int)
sub['Team1'] = sub['ID'].map(lambda x: x.split('_')[1])
sub['Team2'] = sub['ID'].map(lambda x: x.split('_')[2])
sub['IDTeams'] = sub.apply(lambda r: '_'.join(map(str, [r['Team1'], r['Team2']])) , axis=1)
sub['IDTeam1'] = sub.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team1']])) , axis=1)
sub['IDTeam2'] = sub.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team2']])) , axis=1)
sub['Team1Seed'] = sub['IDTeam1'].map(seeds ).fillna(0)
sub['Team2Seed'] = sub['IDTeam2'].map(seeds ).fillna(0)
sub['SeedDiff'] = sub['Team1Seed'] - sub['Team2Seed']
sub = sub.fillna(-1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
df = [train_data, test_data]
for row in df:
row['Title'] = row.Name.str.extract('([A-Za-z]+)\.', expand=False)
pd.crosstab(train_data['Title'], train_data['Sex'] )<feature_engineering>
|
games = pd.merge(games, gb, how='left', left_on='IDTeams', right_on='IDTeams_c_score')
sub = pd.merge(sub, gb, how='left', left_on='IDTeams', right_on='IDTeams_c_score' )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
for row in df:
row['Title'] = row['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
row['Title'] = row['Title'].replace('Mlle', 'Miss')
row['Title'] = row['Title'].replace('Ms', 'Miss')
row['Title'] = row['Title'].replace('Mme', 'Mrs')
train_data[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()<categorify>
|
col = [c for c in games.columns if c not in ['ID', 'DayNum', 'ST', 'Team1', 'Team2', 'IDTeams', 'IDTeam1', 'IDTeam2', 'WTeamID', 'WScore', 'LTeamID', 'LScore', 'NumOT', 'Pred', 'ScoreDiff', 'ScoreDiffNorm', 'WLoc'] + c_score_col]
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for row in df:
row['Title'] = row['Title'].map(title_mapping)
row['Title'] = row['Title'].fillna(0 )<drop_column>
|
model = AdaBoostClassifier(n_estimators=200, learning_rate=1.4)
model.fit(games[col].fillna(-1), games['Pred'])
predictions = model.predict(games[col].fillna(-1)).clip(0,1)
print('Log Loss:', log_loss(games['Pred'], predictions))
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,089,040 |
<define_variables><EOS>
|
sub['Pred'] = model.predict(sub[col].fillna(-1)).clip(0,1)
sub[['ID', 'Pred']].to_csv('submission.csv', index=False )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
<SOS> metric: LogLoss Kaggle data source: womens-machine-learning-competition-2019<categorify>
|
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import AdaBoostClassifier
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
encoder = LabelEncoder()
train_data['Sex'] = encoder.fit_transform(train_data['Sex'])
test_data['Sex'] = encoder.fit_transform(test_data['Sex'] )<define_variables>
|
teams = pd.read_csv('.. /input/wdatafiles/WTeams.csv')
teams2 = pd.read_csv('.. /input/wdatafiles/WTeamSpellings.csv', encoding='latin-1')
season_cresults = pd.read_csv('.. /input/wdatafiles/WRegularSeasonCompactResults.csv')
season_dresults = pd.read_csv('.. /input/wdatafiles/WRegularSeasonDetailedResults.csv')
tourney_cresults = pd.read_csv('.. /input/wdatafiles/WNCAATourneyCompactResults.csv')
tourney_dresults = pd.read_csv('.. /input/wdatafiles/WNCAATourneyDetailedResults.csv')
slots = pd.read_csv('.. /input/wdatafiles/WNCAATourneySlots.csv')
seeds = pd.read_csv('.. /input/wdatafiles/WNCAATourneySeeds.csv')
seeds = {'_'.join(map(str,[int(k1),k2])) :int(v[1:3])for k1, v, k2 in seeds[['Season', 'Seed', 'TeamID']].values}
seeds = {**seeds, **{k.replace('2018_','2019_'):seeds[k] for k in seeds if '2018_' in k}}
cities = pd.read_csv('.. /input/wdatafiles/WCities.csv')
gcities = pd.read_csv('.. /input/wdatafiles/WGameCities.csv')
seasons = pd.read_csv('.. /input/wdatafiles/WSeasons.csv')
sub = pd.read_csv('.. /input/WSampleSubmissionStage1.csv' )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
guess_ages = np.zeros(( 2,3))
df = [train_data, test_data]
guess_ages
<find_best_params>
|
teams2 = teams2.groupby(by='TeamID', as_index=False)['TeamNameSpelling'].count()
teams2.columns = ['TeamID', 'TeamNameCount']
teams = pd.merge(teams, teams2, how='left', on=['TeamID'])
del teams2
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
for row in df:
for i in range(0, 2):
for j in range(0, 3):
guess_df = row[(row['Sex'] == i)& \
(row['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5
for i in range(0, 2):
for j in range(0, 3):
row.loc[(row.Age.isnull())&(row.Sex == i)&(row.Pclass == j+1),\
'Age'] = guess_ages[i,j]
row['Age'] = row['Age'].astype(int)
train_data.head()<feature_engineering>
|
season_cresults['ST'] = 'S'
season_dresults['ST'] = 'S'
tourney_cresults['ST'] = 'T'
tourney_dresults['ST'] = 'T'
games = pd.concat(( season_dresults, tourney_dresults), axis=0, ignore_index=True)
games.reset_index(drop=True, inplace=True)
games['WLoc'] = games['WLoc'].map({'A': 1, 'H': 2, 'N': 3} )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
train_data['AgeBand'] = pd.cut(train_data['Age'], 5)
train_data.head()<sort_values>
|
games['ID'] = games.apply(lambda r: '_'.join(map(str, [r['Season']]+sorted([r['WTeamID'],r['LTeamID']]))), axis=1)
games['IDTeams'] = games.apply(lambda r: '_'.join(map(str, sorted([r['WTeamID'],r['LTeamID']]))), axis=1)
games['Team1'] = games.apply(lambda r: sorted([r['WTeamID'],r['LTeamID']])[0], axis=1)
games['Team2'] = games.apply(lambda r: sorted([r['WTeamID'],r['LTeamID']])[1], axis=1)
games['IDTeam1'] = games.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team1']])) , axis=1)
games['IDTeam2'] = games.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team2']])) , axis=1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
train_data[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False ).mean().sort_values(by='AgeBand', ascending=True )<categorify>
|
games['Team1Seed'] = games['IDTeam1'].map(seeds ).fillna(0)
games['Team2Seed'] = games['IDTeam2'].map(seeds ).fillna(0 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
train_data['AgeBand'] = encoder.fit_transform(train_data['AgeBand'])
train_data.head()<categorify>
|
games['ScoreDiff'] = games['WScore'] - games['LScore']
games['Pred'] = games.apply(lambda r: 1.if sorted([r['WTeamID'],r['LTeamID']])[0]==r['WTeamID'] else 0., axis=1)
games['ScoreDiffNorm'] = games.apply(lambda r: r['ScoreDiff'] * -1 if r['Pred'] == 0.else r['ScoreDiff'], axis=1)
games['SeedDiff'] = games['Team1Seed'] - games['Team2Seed']
games = games.fillna(-1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
test_data['AgeBand'] = pd.cut(test_data['Age'], 5)
test_data['AgeBand'] = encoder.fit_transform(test_data['AgeBand'])
test_data.head()<sort_values>
|
sub['WLoc'] = 3
sub['Season'] = sub['ID'].map(lambda x: x.split('_')[0])
sub['Season'] = sub['ID'].map(lambda x: x.split('_')[0])
sub['Season'] = sub['Season'].astype(int)
sub['Team1'] = sub['ID'].map(lambda x: x.split('_')[1])
sub['Team2'] = sub['ID'].map(lambda x: x.split('_')[2])
sub['IDTeams'] = sub.apply(lambda r: '_'.join(map(str, [r['Team1'], r['Team2']])) , axis=1)
sub['IDTeam1'] = sub.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team1']])) , axis=1)
sub['IDTeam2'] = sub.apply(lambda r: '_'.join(map(str, [r['Season'], r['Team2']])) , axis=1)
sub['Team1Seed'] = sub['IDTeam1'].map(seeds ).fillna(0)
sub['Team2Seed'] = sub['IDTeam2'].map(seeds ).fillna(0)
sub['SeedDiff'] = sub['Team1Seed'] - sub['Team2Seed']
sub = sub.fillna(-1 )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
df = [train_data, test_data]
for row in df:
row['FamilySize'] = row['SibSp'] + row['Parch'] + 1
train_data[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False )<feature_engineering>
|
games = pd.merge(games, gb, how='left', left_on='IDTeams', right_on='IDTeams_c_score')
sub = pd.merge(sub, gb, how='left', left_on='IDTeams', right_on='IDTeams_c_score' )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
for row in df:
row['IsAlone'] = 0
row.loc[row['FamilySize'] == 1, 'IsAlone'] = 1
train_data[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean()<drop_column>
|
col = [c for c in games.columns if c not in ['ID', 'DayNum', 'ST', 'Team1', 'Team2', 'IDTeams', 'IDTeam1', 'IDTeam2', 'WTeamID', 'WScore', 'LTeamID', 'LScore', 'NumOT', 'Pred', 'ScoreDiff', 'ScoreDiffNorm', 'WLoc'] + c_score_col]
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
train_data = train_data.drop(['Parch', 'SibSp'], axis = 1)
test_data = test_data.drop(['Parch', 'SibSp'], axis = 1 )<feature_engineering>
|
model = AdaBoostClassifier(n_estimators=200, learning_rate=1.4)
model.fit(games[col].fillna(-1), games['Pred'])
predictions = model.predict(games[col].fillna(-1)).clip(0,1)
print('Log Loss:', log_loss(games['Pred'], predictions))
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
3,231,074 |
<feature_engineering><EOS>
|
sub['Pred'] = model.predict(sub[col].fillna(-1)).clip(0,1)
sub[['ID', 'Pred']].to_csv('submission.csv', index=False )
|
Google Cloud & NCAA® ML Competition 2019-Women's
|
6,654,012 |
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<install_modules>
|
!pip install tensorflow-gpu==1.14.0 --quiet
!pip install keras==2.2.4 --quiet
|
Understanding Clouds from Satellite Images
|
6,654,012 |
!pip install pyspellchecker<set_options>
|
!pip install tta-wrapper --quiet
seed = 0
seed_everything(seed)
warnings.filterwarnings("ignore" )
|
Understanding Clouds from Satellite Images
|
6,654,012 |
np.random.seed(31415)
spell = spc.SpellChecker()<categorify>
|
train = pd.read_csv('.. /input/understanding_cloud_organization/train.csv')
submission = pd.read_csv('.. /input/understanding_cloud_organization/sample_submission.csv')
train['image'] = train['Image_Label'].apply(lambda x: x.split('_')[0])
train['label'] = train['Image_Label'].apply(lambda x: x.split('_')[1])
submission['image'] = submission['Image_Label'].apply(lambda x: x.split('_')[0])
test = pd.DataFrame(submission['image'].unique() , columns=['image'])
train_df = pd.pivot_table(train, index=['image'], values=['EncodedPixels'], columns=['label'], aggfunc=np.min ).reset_index()
train_df.columns = ['image', 'Fish_mask', 'Flower_mask', 'Gravel_mask', 'Sugar_mask']
print('Compete set samples:', len(train_df))
print('Test samples:', len(submission))
display(train.head() )
|
Understanding Clouds from Satellite Images
|
6,654,012 |
def add_location_to_text(row):
if row['location'] is not np.nan:
return row['text']+" "+row['location']
return row['text']
def add_keywords_to_text(row):
if row['keyword'] is not np.nan:
return row['text']+" "+row['keyword']
return row['text']
def add_sp1(text):
def prepocess_text_for_spell(words):
words = list(filter(lambda x: len(x)>0, words))
return list(filter(lambda x:
not x.startswith("
x[0] != x[0].capitalize() , words))
words = prepocess_text_for_spell(text.split(" "))
return len(spell.unknown(words))
def add_wc(text):
return len(text.split(' '))
def number_hash(text):
words = list(filter(lambda x: len(x)>0, text.split(' ')))
return len(list(filter(lambda x: x.startswith('
def number_of_chars(text):
return len(text)
def has_keyword(keyword):
return int(not keyword is np.nan)
def has_location(location):
return int(not location is np.nan)
def apply_all_feature(df):
df.loc[:,'sp1'] = df['text'].apply(add_sp1 ).values
df.loc[:,'wc'] = df['text'].apply(add_wc ).values
df.loc[:,'hst'] = df['text'].apply(number_hash ).values
df.loc[:,'ch'] = df['text'].apply(number_of_chars ).values
df.loc[:,'loc'] = df['location'].apply(has_location ).values
df.loc[:,'text'] = df.apply(add_location_to_text,axis=1)
df.loc[:,'text'] = df.apply(add_keywords_to_text,axis=1)
df.loc[:,'hl'] = df.apply(has_location,axis=1)
df.loc[:,'hk'] = df.apply(has_keyword,axis=1)
def build_features(df_train,df_test,use_extra=True,use_nmf=False):
Y_train = df_train['target'].values
n_train = len(Y_train)
n_test = len(df_test)
df_train = df_train.drop('target',axis=1)
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,stop_words='english',strip_accents='ascii',min_df=0.,ngram_range=(1,2))
df = df_train.append(df_test, ignore_index = True, sort=False)
assert len(df)==n_train+n_test
df.loc[:,'text'] = df['text'].apply(lambda x: x.lower())
X = vectorizer.fit_transform(df['text'])
if use_extra:
apply_all_feature(df)
X_custom = sklearn.preprocessing.scale(df[['sp1','wc','hst','ch','loc']].values,with_mean=False)
X = scipy.sparse.hstack(( X,X_custom)).tocsr()
if use_nmf:
model = NMF(n_components=15, init='random', random_state=0)
X = scipy.sparse.hstack(( X,model.fit_transform(X),model.fit_transform(X.log1p())) ).tocsr()
X_train = X[:n_train]
X_test = X[n_train:,:]
if 'target' in df_test.columns:
Y_test = df_test['target']
return X_train,X_test,Y_train,Y_test
else:
return X_train,X_test,Y_train,None<load_from_csv>
|
X_train, X_val = train_test_split(train_df, test_size=0.2, random_state=seed)
X_train['set'] = 'train'
X_val['set'] = 'validation'
test['set'] = 'test'
print('Train samples: ', len(X_train))
print('Validation samples: ', len(X_val))
|
Understanding Clouds from Satellite Images
|
6,654,012 |
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
df_train,df_test = sklearn.model_selection.train_test_split(train,test_size=0.33)
X_train,X_test,Y_train,Y_test = build_features(df_train,df_test,use_extra=True,use_nmf=True )<choose_model_class>
|
BACKBONE = 'resnet18'
BATCH_SIZE = 16
EPOCHS = 40
LEARNING_RATE = 1e-3
HEIGHT = 384
WIDTH = 480
CHANNELS = 3
N_CLASSES = 4
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.2
model_path = 'uNet_%s_%sx%s.h5' %(BACKBONE, HEIGHT, WIDTH )
|
Understanding Clouds from Satellite Images
|
6,654,012 |
rreg = RidgeClassifier(alpha=.8,solver='sag')
lass = Lasso(alpha=.01)
rf = RandomForestClassifier(max_depth=20, random_state=0, n_estimators=100)
svm = LinearSVC(penalty='l2',max_iter=10000)
svm = SVC(kernel='linear',max_iter=10000 )<predict_on_test>
|
train_generator = DataGenerator(
directory=train_images_dest_path,
dataframe=X_train,
target_df=train,
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
n_channels=CHANNELS,
n_classes=N_CLASSES,
preprocessing=preprocessing,
augmentation=augmentation,
seed=seed)
valid_generator = DataGenerator(
directory=validation_images_dest_path,
dataframe=X_val,
target_df=train,
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
n_channels=CHANNELS,
n_classes=N_CLASSES,
preprocessing=preprocessing,
seed=seed )
|
Understanding Clouds from Satellite Images
|
6,654,012 |
rreg.fit(X_train, Y_train)
Y_pred = rreg.predict(X_test)
print(sklearn.metrics.classification_report(Y_test,Y_pred))<train_model>
|
model = sm.Unet(backbone_name=BACKBONE,
encoder_weights='imagenet',
classes=N_CLASSES,
activation='sigmoid',
input_shape=(HEIGHT, WIDTH, CHANNELS))
checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
rlrop = ReduceLROnPlateau(monitor='val_loss', mode='min', patience=RLROP_PATIENCE, factor=DECAY_DROP, min_lr=1e-6, verbose=1)
metric_list = [dice_coef, sm.metrics.iou_score]
callback_list = [checkpoint, es, rlrop]
optimizer = RAdam(learning_rate=LEARNING_RATE, warmup_proportion=0.1)
model.compile(optimizer=optimizer, loss=sm.losses.bce_dice_loss, metrics=metric_list)
model.summary()
|
Understanding Clouds from Satellite Images
|
6,654,012 |
lass.fit(X_train, Y_train)
Y_pred = list(map(lambda x: x>=.5, lass.predict(X_test)))
print(sklearn.metrics.classification_report(Y_test,Y_pred))<predict_on_test>
|
STEP_SIZE_TRAIN = len(X_train)//BATCH_SIZE
STEP_SIZE_VALID = len(X_val)//BATCH_SIZE
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
callbacks=callback_list,
epochs=EPOCHS,
verbose=2 ).history
|
Understanding Clouds from Satellite Images
|
6,654,012 |
rf.fit(X_train, Y_train)
Y_pred = rf.predict(X_test)
print(sklearn.metrics.classification_report(Y_test,Y_pred))<predict_on_test>
|
class_names = ['Fish ', 'Flower', 'Gravel', 'Sugar ']
best_tresholds = [.5,.5,.5,.35]
best_masks = [25000, 20000, 22500, 15000]
for index, name in enumerate(class_names):
print('%s treshold=%.2f mask size=%d' %(name, best_tresholds[index], best_masks[index]))
|
Understanding Clouds from Satellite Images
|
6,654,012 |
svm.fit(X_train, Y_train)
Y_pred = svm.predict(X_test)
print(sklearn.metrics.classification_report(Y_test,Y_pred))<prepare_x_and_y>
|
train_metrics = get_metrics(model, train, X_train, train_images_dest_path, best_tresholds, best_masks, seed=seed, preprocessing=preprocessing, set_name='Train')
display(train_metrics)
validation_metrics = get_metrics(model, train, X_val, validation_images_dest_path, best_tresholds, best_masks, seed=seed, preprocessing=preprocessing, set_name='Validation')
display(validation_metrics )
|
Understanding Clouds from Satellite Images
|
6,654,012 |
x_train,x_test,y_train,y_test = build_features(train,
test,
use_extra = True,
use_nmf = True)
kf = KFold(n_splits=5 )<find_best_model_class>
|
model = tta_segmentation(model, h_flip=True, v_flip=True, h_shift=(-10, 10), v_shift=(-10, 10), merge='mean' )
|
Understanding Clouds from Satellite Images
|
6,654,012 |
F = []
A = np.round(np.linspace(0,1,11),2)
for a in tqdm(A):
rreg = RidgeClassifier(alpha=a,solver='sag')
f1 = []
for i,(train_index, test_index)in enumerate(kf.split(x_train)) :
kf_x_train, kf_y_train = x_train[train_index], y_train[train_index]
kf_x_val, kf_y_val = x_train[test_index], y_train[test_index]
rreg.fit(kf_x_train, kf_y_train)
y_pred = rreg.predict(kf_x_val)
res = sklearn.metrics.classification_report(kf_y_val,y_pred, output_dict=True)
f1.append(res['1']['f1-score'])
F.append(f1 )<prepare_x_and_y>
|
test_df = []
for i in range(0, test.shape[0], 300):
batch_idx = list(range(i, min(test.shape[0], i + 300)))
batch_set = test[batch_idx[0]: batch_idx[-1]+1]
test_generator = DataGenerator(
directory=test_images_dest_path,
dataframe=batch_set,
target_df=submission,
batch_size=1,
target_size=(HEIGHT, WIDTH),
n_channels=CHANNELS,
n_classes=N_CLASSES,
preprocessing=preprocessing,
seed=seed,
mode='predict',
shuffle=False)
preds = model.predict_generator(test_generator)
for index, b in enumerate(batch_idx):
filename = test['image'].iloc[b]
image_df = submission[submission['image'] == filename].copy()
pred_masks = preds[index, ].round().astype(int)
pred_rles = build_rles(pred_masks, reshape=(350, 525))
image_df['EncodedPixels'] = pred_rles
pred_masks_post = preds[index, ].astype('float32')
for class_index in range(N_CLASSES):
pred_mask = pred_masks_post[...,class_index]
pred_mask = post_process(pred_mask, threshold=best_tresholds[class_index], min_size=best_masks[class_index])
pred_masks_post[...,class_index] = pred_mask
pred_rles_post = build_rles(pred_masks_post, reshape=(350, 525))
image_df['EncodedPixels_post'] = pred_rles_post
test_df.append(image_df)
sub_df = pd.concat(test_df )
|
Understanding Clouds from Satellite Images
|
6,654,012 |
X_train,X_test,Y_train,Y_test = build_features(train,test,use_extra=True,use_nmf=True )<save_to_csv>
|
submission_df = sub_df[['Image_Label' ,'EncodedPixels']]
submission_df.to_csv('submission.csv', index=False)
display(submission_df.head() )
|
Understanding Clouds from Satellite Images
|
6,654,012 |
rreg = RidgeClassifier(alpha=.3,solver='sag')
rreg.fit(X_train, Y_train)
sub = rreg.predict(X_test)
pd.DataFrame(np.array([test['id'].values,sub] ).T,columns=['id','target'] ).to_csv('submission.csv',sep=',',
header=True,
index=False )<set_options>
|
submission_df_post = sub_df[['Image_Label' ,'EncodedPixels_post']]
submission_df_post.columns = ['Image_Label' ,'EncodedPixels']
submission_df_post.to_csv('submission_post.csv', index=False)
display(submission_df_post.head() )
|
Understanding Clouds from Satellite Images
|
6,654,012 |
<import_modules><EOS>
|
if os.path.exists(train_images_dest_path):
shutil.rmtree(train_images_dest_path)
if os.path.exists(validation_images_dest_path):
shutil.rmtree(validation_images_dest_path)
if os.path.exists(test_images_dest_path):
shutil.rmtree(test_images_dest_path )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
<SOS> metric: Dice Kaggle data source: understanding-clouds-from-satellite-images<import_modules>
|
seed(10)
set_random_seed(10)
%matplotlib inline
|
Understanding Clouds from Satellite Images
|
6,830,010 |
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV<load_from_csv>
|
test_imgs_folder = '.. /input/understanding_cloud_organization/test_images/'
train_imgs_folder = '.. /input/understanding_cloud_organization/train_images/'
num_cores = multiprocessing.cpu_count()
|
Understanding Clouds from Satellite Images
|
6,830,010 |
train = pd.read_csv(".. /input/titanic/train.csv")
test = pd.read_csv(".. /input/titanic/test.csv" )<count_missing_values>
|
train_df = pd.read_csv('.. /input/understanding_cloud_organization/train.csv')
train_df.head()
|
Understanding Clouds from Satellite Images
|
6,830,010 |
train.isnull().sum()<count_missing_values>
|
train_df = train_df[~train_df['EncodedPixels'].isnull() ]
train_df['Image'] = train_df['Image_Label'].map(lambda x: x.split('_')[0])
train_df['Class'] = train_df['Image_Label'].map(lambda x: x.split('_')[1])
classes = train_df['Class'].unique()
train_df = train_df.groupby('Image')['Class'].agg(set ).reset_index()
for class_name in classes:
train_df[class_name] = train_df['Class'].map(lambda x: 1 if class_name in x else 0)
train_df.head()
|
Understanding Clouds from Satellite Images
|
6,830,010 |
train.isnull().sum()<feature_engineering>
|
img_2_ohe_vector = {img:vec for img, vec in zip(train_df['Image'], train_df.iloc[:, 2:].values)}
|
Understanding Clouds from Satellite Images
|
6,830,010 |
train.loc[:, "Age"] = train.groupby(["Pclass", "Sex"] ).Age.apply(lambda x: x.fillna(x.median()))
test.loc[:, "Age"] = test.groupby(["Pclass", "Sex"] ).Age.apply(lambda x : x.fillna(x.median()))<drop_column>
|
train_imgs, val_imgs = train_test_split(train_df['Image'].values,
test_size=0.2,
stratify=train_df['Class'].map(lambda x: str(sorted(list(x)))) ,
random_state=10 )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
train.drop(["Cabin", "Name", "Ticket", "Fare"], axis = "columns", inplace = True)
test.drop(["Cabin", "Name", "Ticket", "Fare"], axis = "columns", inplace = True )<prepare_x_and_y>
|
class DataGenenerator(Sequence):
def __init__(self, images_list=None, folder_imgs=train_imgs_folder,
batch_size=32, shuffle=True, augmentation=None,
resized_height=224, resized_width=224, num_channels=3):
self.batch_size = batch_size
self.shuffle = shuffle
self.augmentation = augmentation
if images_list is None:
self.images_list = os.listdir(folder_imgs)
else:
self.images_list = deepcopy(images_list)
self.folder_imgs = folder_imgs
self.len = len(self.images_list)// self.batch_size
self.resized_height = resized_height
self.resized_width = resized_width
self.num_channels = num_channels
self.num_classes = 4
self.is_test = not 'train' in folder_imgs
if not shuffle and not self.is_test:
self.labels = [img_2_ohe_vector[img] for img in self.images_list[:self.len*self.batch_size]]
def __len__(self):
return self.len
def on_epoch_start(self):
if self.shuffle:
random.shuffle(self.images_list)
def __getitem__(self, idx):
current_batch = self.images_list[idx * self.batch_size:(idx + 1)* self.batch_size]
X = np.empty(( self.batch_size, self.resized_height, self.resized_width, self.num_channels))
y = np.empty(( self.batch_size, self.num_classes))
for i, image_name in enumerate(current_batch):
path = os.path.join(self.folder_imgs, image_name)
img = cv2.resize(cv2.imread(path),(self.resized_height, self.resized_width)).astype(np.float32)
if not self.augmentation is None:
augmented = self.augmentation(image=img)
img = augmented['image']
X[i, :, :, :] = img/255.0
if not self.is_test:
y[i, :] = img_2_ohe_vector[image_name]
return X, y
def get_labels(self):
if self.shuffle:
images_current = self.images_list[:self.len*self.batch_size]
labels = [img_2_ohe_vector[img] for img in images_current]
else:
labels = self.labels
return np.array(labels )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
X = train.drop(["PassengerId", "Survived"], axis = "columns")
y = train.Survived<split>
|
albumentations_train = Compose([
VerticalFlip() , HorizontalFlip() , Rotate(limit=30), GridDistortion()
], p=1 )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state = 1 )<define_variables>
|
data_generator_train = DataGenenerator(train_imgs, augmentation=albumentations_train)
data_generator_train_eval = DataGenenerator(train_imgs, shuffle=False)
data_generator_val = DataGenenerator(val_imgs, shuffle=False )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
numerical_cols = ["Age", "Parch", "SibSp"]
categorical_cols = ["Pclass", "Sex", "Embarked"]<categorify>
|
class PrAucCallback(Callback):
def __init__(self, data_generator, num_workers=num_cores,
early_stopping_patience=5,
plateau_patience=3, reduction_rate=0.5,
stage='train', checkpoints_path='checkpoints/'):
super(Callback, self ).__init__()
self.data_generator = data_generator
self.num_workers = num_workers
self.class_names = ['Fish', 'Flower', 'Sugar', 'Gravel']
self.history = [[] for _ in range(len(self.class_names)+ 1)]
self.early_stopping_patience = early_stopping_patience
self.plateau_patience = plateau_patience
self.reduction_rate = reduction_rate
self.stage = stage
self.best_pr_auc = -float('inf')
if not os.path.exists(checkpoints_path):
os.makedirs(checkpoints_path)
self.checkpoints_path = checkpoints_path
def compute_pr_auc(self, y_true, y_pred):
pr_auc_mean = 0
print(f"
{'
")
for class_i in range(len(self.class_names)) :
precision, recall, _ = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i])
pr_auc = auc(recall, precision)
pr_auc_mean += pr_auc/len(self.class_names)
print(f"PR AUC {self.class_names[class_i]}, {self.stage}: {pr_auc:.3f}
")
self.history[class_i].append(pr_auc)
print(f"
{'
PR AUC mean, {self.stage}: {pr_auc_mean:.3f}
{'
")
self.history[-1].append(pr_auc_mean)
return pr_auc_mean
def is_patience_lost(self, patience):
if len(self.history[-1])> patience:
best_performance = max(self.history[-1][-(patience + 1):-1])
return best_performance == self.history[-1][-(patience + 1)] and best_performance >= self.history[-1][-1]
def early_stopping_check(self, pr_auc_mean):
if self.is_patience_lost(self.early_stopping_patience):
self.model.stop_training = True
def model_checkpoint(self, pr_auc_mean, epoch):
if pr_auc_mean > self.best_pr_auc:
for checkpoint in glob.glob(os.path.join(self.checkpoints_path, 'classifier_densenet169_epoch_*')) :
os.remove(checkpoint)
self.best_pr_auc = pr_auc_mean
self.model.save(os.path.join(self.checkpoints_path, f'classifier_densenet169_epoch_{epoch}_val_pr_auc_{pr_auc_mean}.h5'))
print(f"
{'
Saved new checkpoint
{'
")
def reduce_lr_on_plateau(self):
if self.is_patience_lost(self.plateau_patience):
new_lr = float(keras.backend.get_value(self.model.optimizer.lr)) * self.reduction_rate
keras.backend.set_value(self.model.optimizer.lr, new_lr)
print(f"
{'
Reduced learning rate to {new_lr}.
{'
")
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict_generator(self.data_generator, workers=self.num_workers)
y_true = self.data_generator.get_labels()
pr_auc_mean = self.compute_pr_auc(y_true, y_pred)
if self.stage == 'val':
self.early_stopping_check(pr_auc_mean)
self.model_checkpoint(pr_auc_mean, epoch)
self.reduce_lr_on_plateau()
def get_pr_auc_history(self):
return self.history
|
Understanding Clouds from Satellite Images
|
6,830,010 |
numerical_transform = StandardScaler()
categorical_tranform = Pipeline(steps = [
("imputer", SimpleImputer(strategy = "most_frequent")) ,
("onehot", OneHotEncoder(handle_unknown = "ignore"))
])
preprocessor = ColumnTransformer(transformers=[
("num", numerical_transform, numerical_cols),
("cat", categorical_tranform, categorical_cols)
] )<train_on_grid>
|
train_metric_callback = PrAucCallback(data_generator_train_eval)
val_callback = PrAucCallback(data_generator_val, stage='val' )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
classifier = Pipeline(steps= [
("preprocessor", preprocessor),
("model", LogisticRegression(max_iter = 10000))
])
param_grid = {
'model__C': [0.001, 0.01, 0.1,1, 10, 100, 1000]
}
clf_LR = GridSearchCV(classifier, param_grid, cv = 5, scoring= "accuracy")
clf_LR.fit(X_train, y_train )<find_best_score>
|
def get_model() :
K.clear_session()
base_model = DenseNet169(weights='imagenet', include_top=False, pooling='avg', input_shape=(224, 224, 3))
x = base_model.output
y_pred = Dense(4, activation='sigmoid' )(x)
return Model(inputs=base_model.input, outputs=y_pred)
model = get_model()
|
Understanding Clouds from Satellite Images
|
6,830,010 |
print(clf_LR.best_params_)
print(clf_LR.best_score_ )<train_on_grid>
|
for base_layer in model.layers[:-1]:
base_layer.trainable = False
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy')
history_0 = model.fit_generator(generator=data_generator_train,
validation_data=data_generator_val,
epochs=1,
callbacks=[train_metric_callback, val_callback],
workers=num_cores,
verbose=1
)
|
Understanding Clouds from Satellite Images
|
6,830,010 |
classifier = Pipeline(steps= [
("preprocessor", preprocessor),
("model", SVC())
])
param_grid = {
'model__C': [0.01, 0.1,1, 10, 100],
"model__gamma": [0.001, 0.01, 0.1, 1]
}
clf_SVC = GridSearchCV(classifier, param_grid, cv = 5, scoring= "accuracy")
clf_SVC.fit(X_train, y_train )<find_best_params>
|
for base_layer in model.layers[:-1]:
base_layer.trainable = True
model.compile(optimizer=Adam(lr=1e-5), loss='binary_crossentropy')
history_1 = model.fit_generator(generator=data_generator_train,
validation_data=data_generator_val,
epochs=2,
callbacks=[train_metric_callback, val_callback],
workers=num_cores,
verbose=1,
initial_epoch=1
)
|
Understanding Clouds from Satellite Images
|
6,830,010 |
print(clf_SVC.best_params_)
print(clf_SVC.best_score_ )<compute_train_metric>
|
model = load_model('.. /input/clouds-classifier-files/classifier_densenet169_epoch_21_val_pr_auc_0.8365921057512743.h5' )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
models = [LogisticRegression(max_iter= 10000, C = 0.1), SVC(C = 10, gamma= 0.1), RandomForestClassifier(n_estimators= 50)]
scores = []
for model in models:
classifier = Pipeline(steps= [
("preprocessor", preprocessor),
("model", model)
])
cross = cross_val_score(classifier, X_train, y_train)
scores.append(np.average(cross))
scores<choose_model_class>
|
Image(".. /input/clouds-classifier-files/loss_hist_densenet169.png" )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
classifier_final = Pipeline(steps= [
("preprocessor", preprocessor),
("model", SVC(C = 10, gamma = 0.1))
] )<train_model>
|
Image(".. /input/clouds-classifier-files/pr_auc_hist_densenet169.png" )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
classifier_final.fit(X_train, y_train )<predict_on_test>
|
class_names = ['Fish', 'Flower', 'Sugar', 'Gravel']
def get_threshold_for_recall(y_true, y_pred, class_i, recall_threshold=0.95, precision_threshold=0.94, plot=False):
precision, recall, thresholds = precision_recall_curve(y_true[:, class_i], y_pred[:, class_i])
i = len(thresholds)- 1
best_recall_threshold = None
while best_recall_threshold is None:
next_threshold = thresholds[i]
next_recall = recall[i]
if next_recall >= recall_threshold:
best_recall_threshold = next_threshold
i -= 1
best_precision_threshold = [thres for prec, thres in zip(precision, thresholds)if prec >= precision_threshold][0]
if plot:
plt.figure(figsize=(10, 7))
plt.step(recall, precision, color='r', alpha=0.3, where='post')
plt.fill_between(recall, precision, alpha=0.3, color='r')
plt.axhline(y=precision[i + 1])
recall_for_prec_thres = [rec for rec, thres in zip(recall, thresholds)
if thres == best_precision_threshold][0]
plt.axvline(x=recall_for_prec_thres, color='g')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.legend(['PR curve',
f'Precision {precision[i + 1]:.2f} corresponding to selected recall threshold',
f'Recall {recall_for_prec_thres:.2f} corresponding to selected precision threshold'])
plt.title(f'Precision-Recall curve for Class {class_names[class_i]}')
return best_recall_threshold, best_precision_threshold
y_pred = model.predict_generator(data_generator_val, workers=num_cores)
y_true = data_generator_val.get_labels()
recall_thresholds = dict()
precision_thresholds = dict()
for i, class_name in tqdm(enumerate(class_names)) :
recall_thresholds[class_name], precision_thresholds[class_name] = get_threshold_for_recall(y_true, y_pred, i, plot=True )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
pred = classifier_final.predict(X_valid)
accuracy_score(y_valid, pred )<define_variables>
|
data_generator_test = DataGenenerator(folder_imgs=test_imgs_folder, shuffle=False)
y_pred_test = model.predict_generator(data_generator_test, workers=num_cores )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
PassengerId = test.PassengerId<drop_column>
|
image_labels_empty = set()
for i,(img, predictions)in enumerate(zip(os.listdir(test_imgs_folder), y_pred_test)) :
for class_i, class_name in enumerate(class_names):
if predictions[class_i] < recall_thresholds[class_name]:
image_labels_empty.add(f'{img}_{class_name}' )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
test.drop("PassengerId", axis = "columns", inplace = True )<predict_on_test>
|
submission = pd.read_csv('.. /input/efficient-net-b4-unet-clouds/submission.csv')
submission.head()
|
Understanding Clouds from Satellite Images
|
6,830,010 |
predictions = classifier_final.predict(test )<save_to_csv>
|
predictions_nonempty = set(submission.loc[~submission['EncodedPixels'].isnull() , 'Image_Label'].values )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
output = pd.DataFrame({"PassengerId": PassengerId,
"Survived": predictions})
output.to_csv("submission.csv", index = False )<set_options>
|
print(f'{len(image_labels_empty.intersection(predictions_nonempty)) } masks would be removed' )
|
Understanding Clouds from Satellite Images
|
6,830,010 |
%matplotlib inline
sns.set(palette=sns.color_palette('Set2',9))
<install_modules>
|
submission.loc[submission['Image_Label'].isin(image_labels_empty), 'EncodedPixels'] = np.nan
submission.to_csv('submission_segmentation_and_classifier.csv', index=None )
|
Understanding Clouds from Satellite Images
|
6,636,264 |
<import_modules>
|
def mask2rle(img):
pixels= img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x)for x in runs)
def rle2mask(mask_rle, shape=(2100, 1400)) :
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
if mask_rle != None and type(mask_rle)is str:
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int)for x in(s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape ).T
def normalize(images):
return images/128-1
def denormalize(images):
return(( images+1)*128 ).astype('uint8')
def load_image(Image):
path = TEST_PATH + Image
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if(img.shape !=(height, width, 3)) :
img = cv2.resize(img,(width, height))
return img
def resizeMask(mask, w, h):
resmask = np.zeros(( h, w, mask.shape[2]))
for i in range(mask.shape[2]):
resmask[...,i] = cv2.resize(mask[...,i],(w,h))
return resmask
|
Understanding Clouds from Satellite Images
|
6,636,264 |
print(sns.__version__ )<load_from_csv>
|
TEST_PATH = '.. /input/understanding_cloud_organization/test_images/'
test_df = pd.read_csv('.. /input/understanding_cloud_organization/sample_submission.csv')
test_df['Label'] = test_df['Image_Label'].str.split("_", n = 1, expand = True)[1]
test_df['Image'] = test_df['Image_Label'].str.split("_", n = 1, expand = True)[0]
types = ['Fish', 'Flower', 'Gravel', 'Sugar']
pixel_thresholds = [0.5, 0.5, 0.5, 0.5 ]
mask_sum_threshold = [10000, 10000, 10000, 9000]
mask_threshold=[1000, 1000, 1000, 1000]
def mask_reduce(mask):
reduced_mask = np.zeros(mask.shape,np.float32)
for idx in range(mask.shape[2]):
label_num, labeled_mask = cv2.connectedComponents(mask[:,:, idx].astype(np.uint8))
for label in range(1, label_num):
single_label_mask =(labeled_mask == label)
if single_label_mask.sum() > mask_threshold[idx]:
reduced_mask[single_label_mask, idx] = 1
return reduced_mask.astype('uint8')
def mask_filter(mask):
lim = np.sum(mask, axis=(0,1)) < mask_sum_threshold
for i in range(len(lim)) :
if lim[i]: mask[..., i] = 0
return mask
def cleanup(pred):
return(pred>pixel_thresholds ).astype('uint8')
test_df.head()
|
Understanding Clouds from Satellite Images
|
6,636,264 |
train_path='/kaggle/input/titanic/train.csv'
test_path='/kaggle/input/titanic/test.csv'
titanic_train=pd.read_csv(train_path)
titanic_test=pd.read_csv(test_path )<categorify>
|
path = '.. /input/single-models/'
models = []
model1 = sm.FPN('resnet34', encoder_weights=None, classes=4, input_shape=(None, None, 3), activation='sigmoid')
model1.load_weights(path+'Deotte-NormalBCEJaccard-FPN-Resnet34-val_loss-256.h5')
models.append({"model": model1, 'weight': 1})
model2 = sm.Unet('efficientnetb0', encoder_weights=None, classes=4, input_shape=(None, None, 3), activation='sigmoid')
model2.load_weights(path+'Deotte-NormalJackardBCE-NoPseudo-K0-256-ThisisGood-0.6483.h5')
models.append({"model": model2, 'weight': 1})
model3 = sm.FPN('efficientnetb0', encoder_weights=None, classes=4, input_shape=(None, None, 3), activation='sigmoid')
model3.load_weights(path+'Deotte-NormalBCEJaccard-FPN-val_loss-256.h5')
models.append({"model": model3, 'weight': 1})
model4 = sm.Unet('efficientnetb0', encoder_weights=None, classes=4, input_shape=(None, None, 3), activation='sigmoid')
model4.load_weights(path+'Deotte-perImageBCEJackard_real-noPseudo--256-0.6410.h5')
models.append({"model": model4, 'weight': 1})
model5 = sm.FPN('efficientnetb0', encoder_weights=None, classes=4, input_shape=(None, None, 3), activation='sigmoid')
model5.load_weights(path+'PerImageBCEJaccard-FPN-pseudo-256.h5')
models.append({"model": model5, 'weight': 1})
model6 = sm.Unet('efficientnetb0', encoder_weights=None, classes=4, input_shape=(None, None, 3), activation='sigmoid')
model6.load_weights(path+'NormalBCEJackard-Unet-Effnet-pseudo-256.h5')
models.append({"model": model6, 'weight': 1})
model7 = sm.FPN('resnet34', encoder_weights=None, classes=4, input_shape=(None, None, 3), activation='sigmoid')
model7.load_weights(path+'NormalBCEJaccard-FPN-Resnet34-pseudo-256.h5')
models.append({"model": model7, 'weight': 1})
model8 = sm.Unet('efficientnetb0', encoder_weights=None, classes=4, input_shape=(None, None, 3), activation='sigmoid')
model8.load_weights(path+'NormalBCEJackard-Unet-Effnet-pseudo-K1-256.h5')
models.append({"model": model8, 'weight': 1})
model9 = sm.Unet('resnet34', encoder_weights=None, classes=4, input_shape=(None, None, 3), activation='sigmoid')
model9.load_weights(path+'NormalBCEJackard-Unet-Resnet34-pseudo-K1-256.h5')
models.append({"model": model9, 'weight': 1} )
|
Understanding Clouds from Satellite Images
|
6,636,264 |
titanic_train['Cabin_t'] = titanic_train['Cabin_t'].replace(['A', 'B', 'C','T'], 'ABCT')
titanic_train['Cabin_t'] = titanic_train['Cabin_t'].replace(['D', 'E'], 'DE')
titanic_train['Cabin_t'] = titanic_train['Cabin_t'].replace(['F', 'G'], 'FG' )<feature_engineering>
|
def getClassifier(name):
base = efn.EfficientNetB3(weights=None, include_top=False, input_shape=(None, None, 3), pooling='avg')
base.trainable=True
dropout_dense_layer = 0.3
classifier_model = Sequential()
classifier_model.add(base)
classifier_model.add(Dropout(dropout_dense_layer))
classifier_model.add(Dense(4, activation='sigmoid'))
classifier_model.compile(
loss='binary_crossentropy',
optimizer=Adam() ,
metrics=['accuracy']
)
classifier_model.summary()
classifier_model.load_weights(path+name)
return classifier_model
classifier_models = []
classifier_models.append(getClassifier('classifierB3-256.h5'))
classifier_models.append(getClassifier('classifierB3-blackout00-smooth0-256.h5'))
classifier_models.append(getClassifier('classifierB3-blackout04-256.h5'))
|
Understanding Clouds from Satellite Images
|
6,636,264 |
titanic_train['Name_m']=titanic_train['Name'].str.split(pat=', ',n=1,expand=True)[1].str.split(pat='.',n=1,expand=True)[0]
def Name_transform(x):
if x=='Mr':
return 'Mr'
elif x=='Mrs':
return 'Mrs'
elif x=='Miss':
return 'Miss'
else:
return 'etc'
titanic_train['Name_M']=titanic_train['Name_m'].apply(Name_transform )<categorify>
|
ids = test_df['Image'].unique()
test_df.EncodedPixels = ''
height = 256
width = int(height * 1.5)
class_thresholds = [0.5, 0.5, 0.5, 0.5]
for picIdx in range(len(ids)) :
filename = ids[picIdx]
img = load_image(filename)
if picIdx % 100 == 0: print(picIdx)
batch = np.zeros(( 4, height, width, 3))
batch[0] = img
batch[1] = img[ :, ::-1, :]
batch[2] = img[ ::-1, :, :]
batch[3] = img[ ::-1, ::-1, :]
batch = normalize(batch)
predTTA = np.zeros(( batch.shape[0], img.shape[0], img.shape[1], 4))
for j in range(len(models)) :
predTTA += models[j]['model'].predict(batch)
predTTA /= len(models)
pred =(predTTA[0, :, :, :]+predTTA[1, :, ::-1, :]+predTTA[2, ::-1, :, :]+predTTA[3, ::-1, ::-1, :])/4.0
if len(classifier_models)>0:
classpred = np.zeros(( batch.shape[0], 4))
for j in range(len(classifier_models)) :
classpred += classifier_models[j].predict(batch)
classpred /= len(classifier_models)
classpred = np.mean(classpred, axis=0)
if np.sum(classpred>class_thresholds)== 0:
classpred[np.argmax(classpred)]=1
pred = pred *(classpred>class_thresholds)
pred = cleanup(pred)
pred = mask_reduce(pred)
pred = mask_filter(pred)
pred = resizeMask(pred, 525, 350)
for myType in types:
name = filename+"_"+myType
line = test_df[test_df.Image_Label == name].index[0]
i=types.index(myType)
maskrle = mask2rle(pred[..., i])
test_df.loc[line, 'EncodedPixels'] = maskrle
sub = test_df[['Image_Label', 'EncodedPixels']]
sub.to_csv('submission.csv', index=False)
sub.head(30 )
|
Understanding Clouds from Satellite Images
|
6,636,264 |
titanic_train['Ticket_Freq']=titanic_train.groupby('Ticket')['Ticket'].transform('count' )<load_from_csv>
|
sub['Label'] = sub['Image_Label'].str.split("_", n = 1, expand = True)[1]
sub['Image'] = sub['Image_Label'].str.split("_", n = 1, expand = True)[0]
print(sub[(sub.Label == 'Fish')&(sub.EncodedPixels != '')]['Image'].count())
print(sub[(sub.Label == 'Sugar')&(sub.EncodedPixels != '')]['Image'].count())
print(sub[(sub.Label == 'Gravel')&(sub.EncodedPixels != '')]['Image'].count())
print(sub[(sub.Label == 'Flower')&(sub.EncodedPixels != '')]['Image'].count() )
|
Understanding Clouds from Satellite Images
|
6,338,895 |
titanic_train=pd.read_csv(train_path)
titanic_test=pd.read_csv(test_path)
target=titanic_train['Survived']
Id=titanic_test[['PassengerId']]<train_model>
|
path = '.. /input/understanding_cloud_organization'
|
Understanding Clouds from Satellite Images
|
6,338,895 |
class NullTransformer(BaseEstimator,TransformerMixin):
def fit(self,df,y=None):
return self
def transform(self,df):
df['Embarked'].fillna('S',inplace=True)
df['Cabin'].fillna('X',inplace=True)
missing_Fare_index=list(df['Fare'][df['Fare'].isnull() ].index)
for index in missing_Fare_index:
if df['Pclass'][index]==1:
df['Fare'].iloc[index]=df.groupby('Pclass')['Fare'].median() [1]
elif df['Pclass'][index]==2:
df['Fare'].iloc[index]=df.groupby('Pclass')['Fare'].median() [2]
elif df['Pclass'][index]==3:
df['Fare'].iloc[index]=df.groupby('Pclass')['Fare'].median() [3]
index_NaN_age = list(df["Age"][df["Age"].isnull() ].index)
for index in index_NaN_age :
if df['Pclass'][index]==1:
df['Age'].iloc[index]=df.groupby('Pclass')['Age'].median() [1]
elif df['Pclass'][index]==2:
df['Age'].iloc[index]=df.groupby('Pclass')['Age'].median() [2]
elif df['Pclass'][index]==3:
df['Age'].iloc[index]=df.groupby('Pclass')['Age'].median() [3]
return df<categorify>
|
Train_Dir = '/kaggle/input/understanding_cloud_organization/train_images'
Test_Dir = 'kaggle/input/understanding_cloud_organization/test_images'
for img in tqdm(os.listdir(Train_Dir)) :
imgr = cv2.imread(os.path.join(Train_Dir,img))
|
Understanding Clouds from Satellite Images
|
6,338,895 |
class FeatureExtraction(BaseEstimator,TransformerMixin):
def fit(self,df,y=None):
return self
def transform(self,df):
df['Family']=df['SibSp']+df['Parch']+1
def groupfamily(x):
if x==1:
return 1
elif(2<=x)&(x<=4):
return 2
elif(5<=x)&(x<=6):
return 3
else:
return 4
df['Family']=df['Family'].apply(groupfamily)
df.drop(['SibSp','Parch','PassengerId'],inplace=True,axis=1)
if 'Survived' in list(df.keys()):
df.drop(['Survived'],inplace=True,axis=1)
df['Ticket_freq']=df.groupby('Ticket')['Ticket'].transform('count')
df.drop(['Ticket'],inplace=True,axis=1)
df['Cabin']=df['Cabin'].str.get(i=0)
df['Cabin'] = df['Cabin'].replace(['A', 'B', 'C','T'], 'ABCT')
df['Cabin'] = df['Cabin'].replace(['D', 'E'], 'DE')
df['Cabin'] = df['Cabin'].replace(['F', 'G'], 'FG')
df['Name']=df['Name'].str.split(pat=', ',n=1,expand=True)[1].str.split(pat='.',n=1,expand=True)[0]
def Name_transform(x):
if x=='Mr':
return 'Mr'
elif x=='Mrs':
return 'Mrs'
elif x=='Miss':
return 'Miss'
else:
return 'etc'
df['Name']=df['Name'].apply(Name_transform)
AGE_BINS=[-1,5,17,20,24,28,32,40,48,100]
df['Age_cat']=pd.cut(df['Age'],AGE_BINS,labels=[0.,1.,2.,3.,4.,5.,6.,7.,8.])
FARE_BINS=[-1,7.35, 7.82, 8, 10, 13, 23,30, 45, 80, 150, 1000]
df['Fare_cat']=pd.cut(df['Fare'],bins=FARE_BINS,labels=[0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.])
df.drop(['Age','Fare'],inplace=True,axis=1)
return df<categorify>
|
train_csv_folder = '/kaggle/input/understanding_cloud_organization/train.csv'
train_csv = pd.read_csv(train_csv_folder)
|
Understanding Clouds from Satellite Images
|
6,338,895 |
attribs=['Pclass','Name','Sex','Age','Ticket','Fare','Cabin','Embarked','SibSp','Parch']
num_attribs=['Pclass','Age_cat','Fare_cat',"Ticket_freq"]
cat_attribs=['Name','Sex','Cabin','Embarked','Family']
pipeline1=Pipeline([
('NT',NullTransformer()),
('FE',FeatureExtraction())
])
train=pipeline1.fit_transform(titanic_train)
test=pipeline1.transform(titanic_test)
train=pd.get_dummies(train,columns=cat_attribs)
test=pd.get_dummies(test,columns=cat_attribs)
<data_type_conversions>
|
train_csv['ImageId'] = train_csv['Image_Label'].apply(lambda x: x.split('_')[0])
train_csv['ClassId'] = train_csv['Image_Label'].apply(lambda x: x.split('_')[1])
train_csv['hasmask'] = ~train_csv['EncodedPixels'].isna()
|
Understanding Clouds from Satellite Images
|
6,338,895 |
train=train.to_numpy()
test=test.to_numpy()<choose_model_class>
|
mask_count_df = train_csv.groupby('ImageId' ).agg(np.sum ).reset_index()
mask_count_df.sort_values('hasmask', ascending=False, inplace=True)
print(mask_count_df.shape)
mask_count_df.head(10 )
|
Understanding Clouds from Satellite Images
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.