kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
11,485,474 |
def shop_name2city(sn):
sn = sn.split() [0]
if sn == 'Цифровой' or sn == 'Интернет-магазин': sn = 'Internet'
if sn[0] == '!': sn = sn[1:]
return sn
df_shops['city'] = df_shops['shop_name'].apply(shop_name2city)
df_shops['city_enc'] = LabelEncoder().fit_transform(df_shops['city'] ).astype('int8')
city_info = pd.read_pickle(adpath + 'city_info.pkl')
df_shops['city_size'] = df_shops['city'].map(city_info['city_size'] )<categorify>
|
K.clear_session()
model = Sequential()
model.add(Conv2D(32,(3, 3), activation='tanh', input_shape=(28, 28, 1), padding="SAME"))
model.add(Conv2D(32,(3, 3), activation='tanh', padding="SAME"))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(64,(3, 3), activation='tanh', padding="SAME"))
model.add(Conv2D(64,(3, 3), activation='tanh', padding="SAME"))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128,(3, 3), activation='tanh', padding="SAME"))
model.add(Conv2D(128,(3, 3), activation='tanh', padding="SAME"))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(784, activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(256, activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(128, activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='Adamax',
metrics=['accuracy'] )
|
Digit Recognizer
|
11,485,474 |
class Items() :
def __init__(self, df_items, df_itemcat):
self.df_items = df_items
self.df_itemcat = df_itemcat
self.set_hl_cat()
self.make_items_ext()
self.item_features = ['item_category_id', 'hl_cat_id']
def set_hl_cat(self):
self.df_itemcat['hl_cat_id'] = self.df_itemcat['item_category_name'].str.split(n=1, expand=True)[0]
self.df_itemcat['hl_cat_id'] = LabelEncoder().fit_transform(self.df_itemcat['hl_cat_id'])
def make_items_ext(self):
self.df_items = df_items.merge(self.df_itemcat, how = 'left',
left_on = 'item_category_id', right_index = True)
def get_items_df(self):
return self.df_items[self.item_features].astype('int32' )<concatenate>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 )
|
Digit Recognizer
|
11,485,474 |
items = Items(df_items, df_itemcat )<prepare_output>
|
K.clear_session()
hh = model.fit(generator.flow(X_train, y_train_cat, batch_size=64), validation_data=(X_test, y_test_cat),
steps_per_epoch=len(X_train)/ 64, epochs=30, verbose=1, callbacks=[learning_rate_reduction] )
|
Digit Recognizer
|
11,485,474 |
class TT_Extended() :
def __init__(self, df_train, df_test, items, df_shops, calendar, cmode, verbose=True):
self.info = verbose
self.df_train = df_train.copy()
self.df_test = df_test.copy()
self.df_shops = df_shops.copy()
self.calendar = self.set_calender(calendar.copy())
self.idx_columns = ['date_block_num', 'shop_id', 'item_id']
self.df_test['date_block_num'] = 34
self.df_test['item_cnt_month'] = 0.
self.df_test['item_cnt_month'] = self.df_test['item_cnt_month'].astype('float32')
self.df_train[self.idx_columns] = self.df_train[self.idx_columns].astype('int32')
self.df_test[self.idx_columns] = self.df_test[self.idx_columns].astype('int32')
self.df_train_cleaning(cmode)
self.item_mean_features = []
self.shop_mean_features = []
self.lag_names_to_clip = []
self.df_items = items.get_items_df()
self.item_ext_features = list(self.df_items.columns)
self.df_items_ext = self.items_ext()
self.df_bb = self.build_bb()
self.df_train_ext = self.df_train_agg(cmin = 0, cmax = 1000, drop = None)
self.add_test_df()
self.df_train_ext = self.df_train_extention()
def df_train_cleaning(self, mode):
assert mode in ['keep', 'drop', 'block', 'total', 't+b']
if self.info: print('Cleaning train dataframe...( Mode -', mode, ')')
shop_idx = self.df_train[(self.df_train['shop_id'] == 9)|
(self.df_train['shop_id'] == 20)].index
self.df_train.drop(shop_idx, inplace=True)
self.df_train = self.df_train[(self.df_train['item_price'] > 0)&
(self.df_train['item_price'] < 51000)]
self.df_train = self.df_train[self.df_train['item_cnt_day'] <= 1000]
shop_repl_dict = {0 : 57, 1 : 58, 11 : 10, 40 : 39}
self.df_train['shop_id'] = self.df_train['shop_id'].apply(
lambda s: shop_repl_dict.get(s)if s in shop_repl_dict else s)
if mode == 'drop':
self.df_train = self.df_train[self.df_train['item_cnt_day'] > 0]
elif mode == 'block':
item_block_cnt = self.df_train.groupby(['date_block_num', 'shop_id', 'item_id'])['item_cnt_day'].sum()
items_to_drop = item_block_cnt[item_block_cnt <= 0].index
self.df_train = self.df_train[~self.df_train.set_index(
['date_block_num', 'shop_id', 'item_id'] ).index.isin(items_to_drop)]
elif mode == 'total':
item_total_cnt = self.df_train.groupby(['shop_id', 'item_id'])['item_cnt_day'].sum()
items_to_drop = item_total_cnt[item_total_cnt <= 0].index
self.df_train = self.df_train[~self.df_train.set_index(
['shop_id', 'item_id'] ).index.isin(items_to_drop)]
elif mode == 't+b':
item_total_cnt = self.df_train.groupby(['shop_id', 'item_id'])['item_cnt_day'].sum()
items_to_drop = item_total_cnt[item_total_cnt <= 0].index
self.df_train = self.df_train[~self.df_train.set_index(
['shop_id', 'item_id'] ).index.isin(items_to_drop)]
item_block_cnt = self.df_train.groupby(['date_block_num', 'shop_id', 'item_id'])['item_cnt_day'].sum()
items_to_drop = item_block_cnt[item_block_cnt <= 0].index
self.df_train = self.df_train[~self.df_train.set_index(
['date_block_num', 'shop_id', 'item_id'] ).index.isin(items_to_drop)]
return
def set_calender(self, calendar):
calendar['date_block_num'] =(calendar['year'] - 2013)*12 +(calendar['month'] - 1)
calendar['hdays'] = calendar['mdays'] - calendar['wdays']
calendar.set_index('date_block_num', inplace=True)
return calendar
def items_ext(self):
dfi = self.df_items.copy()
dfi['fsb'] = self.df_train.groupby('item_id')['date_block_num'].min()
dfi['fsb'].fillna(34, inplace=True)
dfi['fsb'] = dfi['fsb'].astype('int8')
self.item_ext_features += ['fsb']
return dfi
def build_bb(self):
if self.info: print('Building index dataframe...')
df_work = []
for block_num in self.df_train['date_block_num'].unique() :
cur_shops = self.df_train.loc[self.df_train['date_block_num'] == block_num, 'shop_id'].unique()
cur_items = self.df_train.loc[self.df_train['date_block_num'] == block_num, 'item_id'].unique()
df_work.append(np.array(list(product(*[[block_num], cur_shops, cur_items])) , dtype='int32'))
df_work = pd.DataFrame(np.vstack(df_work), columns = self.idx_columns)
return df_work
def df_train_agg(self, cmin = 0, cmax = 20, drop = None):
if self.info: print('Aggregation...')
df_work = self.df_train.groupby(self.idx_columns ).agg({'item_price' : np.mean,
'item_cnt_day': np.sum})
df_work.reset_index(inplace=True)
df_work = df_work.rename(columns={'item_cnt_day': 'item_cnt_month'})
df_work = pd.merge(self.df_bb, df_work, on=self.idx_columns, how='left')
df_work['item_cnt_month'] = df_work['item_cnt_month'].astype('float32' ).fillna(0.).clip(cmin, cmax)
df_work['item_price'] = df_work['item_price'].astype('float32' ).fillna(0.)
df_tmp = self.df_train[self.df_train['item_cnt_day'] > 0].groupby(self.idx_columns ).agg({'item_cnt_day': 'count'})
df_tmp.reset_index(inplace=True)
df_tmp = df_tmp.rename(columns={'item_cnt_day': 'item_rate_month'})
df_work = pd.merge(df_work, df_tmp, on=self.idx_columns, how='left')
df_work['item_rate_month'] = df_work['item_rate_month'].astype('float32' ).fillna(0.)
del df_tmp
if drop: df_work.drop(drop, axis=1, inplace=True)
return df_work
def add_test_df(self):
self.df_train_ext = pd.concat([self.df_train_ext, self.df_test], ignore_index=True,
sort=False, keys=self.idx_columns)
def add_item_means(self, df, feature = None):
if feature == None :
group_items = ['date_block_num','item_id']
feature = 'item_cnt'
else:
group_items = ['date_block_num','item_id'] + [feature]
feature_mean_name = feature + '_mean'
if self.info: print('Adding item means for', feature, '...')
df_tmp = df.groupby(group_items)['item_cnt_month'].mean()
df_tmp = df_tmp.reset_index().rename(columns = {'item_cnt_month': feature_mean_name})
df = pd.merge(df, df_tmp, on=group_items, how='left')
self.item_mean_features.append(feature_mean_name)
del df_tmp
return df
def add_shop_means(self, df, feature):
group_items = ['date_block_num', 'shop_id'] + [feature]
feature_mean_name = feature + '_mean'
if self.info: print('Adding shop means for', feature, '...')
df_tmp = df.groupby(group_items)['item_cnt_month'].mean()
df_tmp = df_tmp.reset_index().rename(columns = {'item_cnt_month': feature_mean_name})
df = pd.merge(df, df_tmp, on=group_items, how='left')
self.shop_mean_features.append(feature)
del df_tmp
return df
def df_train_extention(self, test_cat_only = False):
df_work = self.df_train_ext.merge(df_shops[['city_enc', 'city_size']], how = 'left', on = 'shop_id')
df_work = df_work.merge(self.calendar[['mdays', 'wdays', 'hdays']],
how = 'left', left_on = 'date_block_num', right_index = True)
df_work = df_work.merge(self.df_items_ext[self.item_ext_features],
how = 'left', left_on = 'item_id', right_index = True)
ssbn = self.df_train.groupby('shop_id')['date_block_num'].min().astype('int8')
ssbn.name = 'ssbn'
df_work = df_work.merge(ssbn, how = 'left', on = 'shop_id')
if test_cat_only:
test_cat = df_work[df_work['date_block_num'] == 34]['item_category_id'].unique()
df_work = df_work[df_work['item_category_id'].isin(test_cat)]
return df_work
def make_base_df(self, keepnans = False):
df_work = self.df_train_ext.copy()
if keepnans:
fill_value = None
else:
fill_value = 0
df_work = pd.pivot_table(df_work, values='item_cnt_month', index=['shop_id', 'item_id'],
columns = 'date_block_num', aggfunc=np.sum, fill_value = fill_value)
df_work.columns.name = ''
return df_work
def add_total_cnt(self, df):
if self.info: print('Adding total count...')
df_base = self.make_base_df()
for i in range(1, 33):
df_base[i + 1] += df_base[i]
df_base = df_base.shift(1, axis=1 ).loc[:, 1:].astype('int32')
df_base = df_base.melt(var_name='date_block_num', value_name='total_cnt', ignore_index=False)
df = df.merge(df_base, how='left', on=self.idx_columns)
del df_base
return df
def add_item_lags(self, df, feature_name, nlags=3, keepnans=False, dnc=False):
if self.info: print('Adding item lags for', feature_name, '...')
df_tmp = df[['date_block_num', 'shop_id', 'item_id', feature_name]]
for i in range(nlags, 0, -1):
lag_feature_name = feature_name +'_lag-' + str(i)
if not dnc: self.lag_names_to_clip.append(lag_feature_name)
df_shifted = df_tmp.copy()
df_shifted.columns = ['date_block_num', 'shop_id', 'item_id', lag_feature_name]
df_shifted['date_block_num'] += i
df = pd.merge(df, df_shifted, on=['date_block_num', 'shop_id', 'item_id'], how='left')
if keepnans:
df[lag_feature_name] = df[lag_feature_name].astype('float32')
else:
df[lag_feature_name] = df[lag_feature_name].fillna(0 ).astype('float32')
del df_tmp
return df
def add_shop_lags(self, df, feature_name, nlags=3, dnc=False):
mean_feature_name = feature_name + '_mean'
if self.info: print('Adding lags for', mean_feature_name, '...')
df_tmp = df[['date_block_num', 'shop_id', feature_name, mean_feature_name]]
for i in range(nlags, 0, -1):
lag_feature_name = mean_feature_name + '_lag-' + str(i)
if not dnc: self.lag_names_to_clip.append(lag_feature_name)
df_shifted = df_tmp.copy()
df_shifted.columns = ['date_block_num', 'shop_id', feature_name, lag_feature_name]
df_shifted['date_block_num'] += i
df = pd.merge(df, df_shifted.drop_duplicates() , on=['date_block_num', 'shop_id', feature_name], how='left')
df[lag_feature_name] = df[lag_feature_name].fillna(0 ).astype('float32')
del df_tmp
del df_shifted
return df
def shop_clustering(self, df_work):
print('No shop clusters provided')
return df_work
def build_work_db(self, hd, item_mean_features = [], shop_mean_features = [], add_total_cnt = False):
if self.info: print('Building work dataframe...')
df_work = self.df_train_ext.copy()
df_work = self.add_item_means(df_work)
for mf in item_mean_features:
df_work = self.add_item_means(df_work, mf)
for mf in shop_mean_features:
df_work = self.add_shop_means(df_work, mf)
df_work = self.add_item_lags(df_work, 'item_cnt_month', hd, keepnans=False)
df_work = self.add_item_lags(df_work, 'item_rate_month', hd, dnc=True)
df_work = self.add_item_lags(df_work, 'item_price', hd, dnc=True)
for mf in self.item_mean_features:
df_work = self.add_item_lags(df_work, mf, hd)
for mf in self.shop_mean_features:
df_work = self.add_shop_lags(df_work, mf, hd)
df_work.drop(df_work[df_work['date_block_num'] < hd].index, inplace=True)
df_work.drop(self.item_mean_features, axis=1, inplace=True)
df_work.drop(['item_category_id_mean', 'item_price', 'item_rate_month'], axis=1, inplace=True)
self.item_mean_features = []
self.shop_mean_features = []
df_work['qmean'] = df_work[['item_cnt_month_lag-1',
'item_cnt_month_lag-2',
'item_cnt_month_lag-3']].mean(skipna=True, axis=1)
df_work['new'] = df_work['fsb'] == df_work['date_block_num']
df_work['fsb'] = df_work['date_block_num'] - df_work['fsb']
df_work['ssbn'] = df_work['date_block_num'] - df_work['ssbn']
df_work['month'] =(df_work['date_block_num']%12 ).astype('int8')
idx = df_work[(df_work['item_price_lag-1'] == 0)&(df_work['item_price_lag-2'] != 0)].index
df_work.loc[idx, 'item_price_lag-1'] = df_work.loc[idx, 'item_price_lag-2']
idx = df_work[(df_work['item_price_lag-2'] == 0)&(df_work['item_price_lag-3'] != 0)].index
df_work.loc[idx, 'item_price_lag-2'] = df_work.loc[idx, 'item_price_lag-3']
df_work['grad-1'] = df_work['item_cnt_month_lag-1']/df_work['item_cnt_month_lag-2']
df_work['grad-1'] = df_work['grad-1'].replace([np.inf, -np.inf], np.nan ).fillna(0.)
df_work['grad-2'] = df_work['item_cnt_month_lag-2']/df_work['item_cnt_month_lag-3']
df_work['grad-2'] = df_work['grad-2'].replace([np.inf, -np.inf], np.nan ).fillna(0.)
df_work = self.shop_clustering(df_work)
if add_total_cnt: df_work = self.add_total_cnt(df_work)
col2clip = ['item_cnt_month', 'qmean'] + self.lag_names_to_clip
df_work[col2clip] = df_work[col2clip].clip(0, 20)
return df_work
def get_work_db(self, hd = 3, item_mean_features = [],
shop_mean_features = [],
drop_features = None,
add_total_cnt = False):
df_work = self.build_work_db(hd, item_mean_features, shop_mean_features, add_total_cnt)
if drop_features == None:
return df_work
else:
return df_work.drop(drop_features, axis = 1 )<concatenate>
|
trained_weights = model.get_weights()
|
Digit Recognizer
|
11,485,474 |
%%time
pfs = TT_Extended(df_train, df_test, items, df_shops, calendar, cmode='total' )<set_options>
|
learning_rate_reduction_2 = ReduceLROnPlateau(monitor='val_accuracy',
patience=3,
verbose=1,
factor=0.3,
min_lr=0.00001 )
|
Digit Recognizer
|
11,485,474 |
pfs.shop_clustering = simple_shop_clustering<drop_column>
|
K.clear_session()
model.set_weights(trained_weights)
h = model.fit(X_train, y_train_cat,
batch_size = 64,
validation_data=(X_test, y_test_cat),
epochs=50,
verbose=1, callbacks=[learning_rate_reduction_2] )
|
Digit Recognizer
|
11,485,474 |
%%time
df_work = pfs.get_work_db(hd = 3,
item_mean_features = ['city_enc'],
shop_mean_features = ['item_category_id'],
drop_features = ['wdays', 'hdays', 'ssbn'],
add_total_cnt = False ).copy()<feature_engineering>
|
model.evaluate(X_test, y_test_cat )
|
Digit Recognizer
|
11,485,474 |
df_work['city_size'] = df_work['city_size'].round(1 )<prepare_x_and_y>
|
predictions = model.predict(test_data )
|
Digit Recognizer
|
11,485,474 |
X_train = df_work[df_work.date_block_num < 33].drop(['item_cnt_month'], axis=1)
y_train = df_work[df_work.date_block_num < 33]['item_cnt_month']
X_valid = df_work[df_work.date_block_num == 33].drop(['item_cnt_month'], axis=1)
y_valid = df_work[df_work.date_block_num == 33]['item_cnt_month']
X_test = df_work[df_work.date_block_num == 34].drop(['item_cnt_month'], axis=1 )<drop_column>
|
pred = np.argmax(predictions, axis = 1)
pred.shape
|
Digit Recognizer
|
11,485,474 |
del df_work<init_hyperparams>
|
sample = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv' )
|
Digit Recognizer
|
11,485,474 |
<save_to_csv><EOS>
|
output = pd.DataFrame({'ImageId': sample.ImageId, 'Label': pred})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!" )
|
Digit Recognizer
|
11,530,975 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
|
mnist_test = pd.read_csv(".. /input/mnist-in-csv/mnist_test.csv")
mnist_train = pd.read_csv(".. /input/mnist-in-csv/mnist_train.csv" )
|
Digit Recognizer
|
11,530,975 |
warnings.filterwarnings(action='ignore')
data_path = '/kaggle/input/competitive-data-science-predict-future-sales/'
sales_train = pd.read_csv(data_path + 'sales_train.csv')
shops = pd.read_csv(data_path + 'shops.csv')
items = pd.read_csv(data_path + 'items.csv')
item_categories = pd.read_csv(data_path + 'item_categories.csv')
test = pd.read_csv(data_path + 'test.csv')
submission = pd.read_csv(data_path + 'sample_submission.csv' )<data_type_conversions>
|
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
|
Digit Recognizer
|
11,530,975 |
def downcast(df, verbose=True):
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
dtype_name = df[col].dtype.name
if dtype_name == 'object':
pass
elif dtype_name == 'bool':
df[col] = df[col].astype('int8')
elif dtype_name.startswith('int')or(df[col].round() == df[col] ).all() :
df[col] = pd.to_numeric(df[col], downcast='integer')
else:
df[col] = pd.to_numeric(df[col], downcast='float')
end_mem = df.memory_usage().sum() / 1024**2
if verbose:
print('{:.1f}% compressed'.format(100 *(start_mem - end_mem)/ start_mem))
return df
all_df = [sales_train, shops, items, item_categories, test]
for df in all_df:
df = downcast(df )<filter>
|
test['dataset'] = 'test'
|
Digit Recognizer
|
11,530,975 |
sales_train = sales_train[sales_train['item_price'] > 0]
sales_train = sales_train[sales_train['item_price'] < 50000]
sales_train = sales_train[sales_train['item_cnt_day'] > 0]
sales_train = sales_train[sales_train['item_cnt_day'] < 1000]<feature_engineering>
|
train['dataset'] = 'train'
|
Digit Recognizer
|
11,530,975 |
sales_train.loc[sales_train['shop_id'] == 0, 'shop_id'] = 57
sales_train.loc[sales_train['shop_id'] == 1, 'shop_id'] = 58
sales_train.loc[sales_train['shop_id'] == 10, 'shop_id'] = 11
sales_train.loc[sales_train['shop_id'] == 39, 'shop_id'] = 40
test.loc[test['shop_id'] == 0, 'shop_id'] = 57
test.loc[test['shop_id'] == 1, 'shop_id'] = 58
test.loc[test['shop_id'] == 10, 'shop_id'] = 11
test.loc[test['shop_id'] == 39, 'shop_id'] = 40<feature_engineering>
|
dataset = pd.concat([train.drop('label', axis=1), test] ).reset_index()
|
Digit Recognizer
|
11,530,975 |
shops['city'] = shops['shop_name'].apply(lambda x: x.split() [0] )<feature_engineering>
|
mnist = pd.concat([mnist_train, mnist_test] ).reset_index(drop=True)
labels = mnist['label'].values
mnist.drop('label', axis=1, inplace=True)
mnist.columns = cols
|
Digit Recognizer
|
11,530,975 |
shops.loc[shops['city'] =='!Якутск', 'city'] = 'Якутск'<categorify>
|
idx_mnist = mnist.sort_values(by=list(mnist.columns)).index
dataset_from = dataset.sort_values(by=list(mnist.columns)) ['dataset'].values
original_idx = dataset.sort_values(by=list(mnist.columns)) ['index'].values
|
Digit Recognizer
|
11,530,975 |
label_encoder = LabelEncoder()
shops['city'] = label_encoder.fit_transform(shops['city'] )<drop_column>
|
for i in range(len(idx_mnist)) :
if dataset_from[i] == 'test':
sample_submission.loc[original_idx[i], 'Label'] = labels[idx_mnist[i]]
|
Digit Recognizer
|
11,530,975 |
shops = shops.drop('shop_name', axis=1)
shops.head()<drop_column>
|
sample_submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
11,484,816 |
items = items.drop(['item_name'], axis=1 )<groupby>
|
sns.set()
|
Digit Recognizer
|
11,484,816 |
items['first_sale_date'] = sales_train.groupby('item_id' ).agg({'date_block_num': 'min'})['date_block_num']
items.head()<count_missing_values>
|
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
train
|
Digit Recognizer
|
11,484,816 |
items[items['first_sale_date'].isna() ]<data_type_conversions>
|
X_train = train.drop(['label'],axis = 1)
y_train = train['label']
|
Digit Recognizer
|
11,484,816 |
items['first_sale_date'] = items['first_sale_date'].fillna(34 )<feature_engineering>
|
X_test = test
|
Digit Recognizer
|
11,484,816 |
item_categories['category'] = item_categories['item_category_name'].apply(lambda x: x.split() [0] )<count_values>
|
print('Number of null values in training set is : ',train.isnull().sum().unique())
print('Number of null values in test set is : ',test.isnull().sum().unique() )
|
Digit Recognizer
|
11,484,816 |
item_categories['category'].value_counts()<feature_engineering>
|
X_train_reshaped = X_train.values.reshape(-1,28,28,1)
X_test_reshaped = X_test.values.reshape(-1,28,28,1)
X_train_normalised = X_train_reshaped/255.
X_test_normalised = X_test_reshaped/255 .
|
Digit Recognizer
|
11,484,816 |
def make_etc(x):
if len(item_categories[item_categories['category']==x])>= 5:
return x
else:
return 'etc'
item_categories['category'] = item_categories['category'].apply(make_etc )<categorify>
|
y_train_encoded = to_categorical(y_train,num_classes=10 )
|
Digit Recognizer
|
11,484,816 |
label_encoder = LabelEncoder()
item_categories['category'] = label_encoder.fit_transform(item_categories['category'])
item_categories = item_categories.drop('item_category_name', axis=1 )<merge>
|
X_train_final,X_val,y_train_final,y_val = train_test_split(X_train_normalised,y_train_encoded,test_size = 0.2,random_state = 42 )
|
Digit Recognizer
|
11,484,816 |
group = sales_train.groupby(idx_features ).agg({'item_cnt_day': 'sum',
'item_price': 'mean'})
group = group.reset_index()
group = group.rename(columns={'item_cnt_day': 'item_cnt_month', 'item_price': 'item_price_mean'})
train = train.merge(group, on=idx_features, how='left')
train.head()<set_options>
|
model = Sequential()
model.add(Conv2D(input_shape=(28,28,1),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(BatchNormalization())
model.add(Conv2D(filters=32,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding="same", activation="relu"))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Dropout(0.3))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(units=256,activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(units=10, activation="softmax"))
|
Digit Recognizer
|
11,484,816 |
del group
gc.collect() ;<merge>
|
model.compile(loss = 'categorical_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
model.summary()
|
Digit Recognizer
|
11,484,816 |
group = sales_train.groupby(idx_features ).agg({'item_cnt_day': 'count'})
group = group.reset_index()
group = group.rename(columns={'item_cnt_day': 'item_count'})
train = train.merge(group, on=idx_features, how='left')
del group, sales_train
gc.collect()
train.head()<categorify>
|
callbacks = [
EarlyStopping(monitor = 'loss', patience = 6),
ReduceLROnPlateau(monitor = 'loss', patience = 4)
]
|
Digit Recognizer
|
11,484,816 |
test['date_block_num'] = 34
all_data = pd.concat([train, test.drop('ID', axis=1)],
ignore_index=True,
keys=idx_features)
all_data = all_data.fillna(0)
all_data.head()<merge>
|
model.fit(X_train_final,y_train_final,
batch_size = 64,
epochs = 100,
verbose = 1,
validation_data =(X_val,y_val),
callbacks = callbacks )
|
Digit Recognizer
|
11,484,816 |
all_data = all_data.merge(shops, on='shop_id', how='left')
all_data = all_data.merge(items, on='item_id', how='left')
all_data = all_data.merge(item_categories, on='item_category_id', how='left')
all_data = downcast(all_data)
del shops, items, item_categories
gc.collect() ;<prepare_output>
|
score = model.evaluate(X_val,y_val,verbose = 0)
print('The loss on validation set is {0} and the accuracy is {1}'.format(round(score[0],3),round(score[1],3)) )
|
Digit Recognizer
|
11,484,816 |
def resumetable(df):
print(f'Data Shape: {df.shape}')
summary = pd.DataFrame(df.dtypes, columns=['Dtypes'])
summary['Null'] = df.isnull().sum().values
summary['Uniques'] = df.nunique().values
summary['First_values'] = df.loc[0].values
summary['Second_values'] = df.loc[1].values
summary['Third_values'] = df.loc[2].values
return summary<groupby>
|
results = model.predict(X_test_normalised)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name='Label')
|
Digit Recognizer
|
11,484,816 |
def add_mean_features(df, mean_features, idx_features):
assert(idx_features[0] == 'date_block_num')and \
len(idx_features)in [2, 3]
if len(idx_features)== 2:
feature_name = idx_features[1] + '_mean_sales'
else:
feature_name = idx_features[1] + '_' + idx_features[2] + '_mean_sales'
group = df.groupby(idx_features ).agg({'item_cnt_month': 'mean'})
group = group.reset_index()
group = group.rename(columns={'item_cnt_month': feature_name})
df = df.merge(group, on=idx_features, how='left')
df = downcast(df, False)
mean_features.append(feature_name)
del group
gc.collect()
return df, mean_features<feature_engineering>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_result.csv",index=False )
|
Digit Recognizer
|
11,088,963 |
item_mean_features = []
all_data, item_mean_features = add_mean_features(df=all_data,
mean_features=item_mean_features,
idx_features=['date_block_num', 'item_id'])
all_data, item_mean_features = add_mean_features(df=all_data,
mean_features=item_mean_features,
idx_features=['date_block_num', 'item_id', 'city'] )<drop_column>
|
print(tf.__version__)
|
Digit Recognizer
|
11,088,963 |
shop_mean_features = []
all_data, shop_mean_features = add_mean_features(df=all_data,
mean_features=shop_mean_features,
idx_features=['date_block_num', 'shop_id', 'item_category_id'] )<feature_engineering>
|
train_data = pd.read_csv('.. /input/digit-recognizer/train.csv')
test_data = pd.read_csv('.. /input/digit-recognizer/test.csv')
print("train data shape: {}".format(train_data.shape))
print("test data shape: {}".format(test_data.shape))
X, Y = train_data.drop(['label'], axis=1), train_data['label']
del train_data
|
Digit Recognizer
|
11,088,963 |
def add_lag_features(df, lag_features_to_clip, idx_features,
lag_feature, nlags=3, clip=False):
df_temp = df[idx_features + [lag_feature]].copy()
for i in range(1, nlags+1):
lag_feature_name = lag_feature +'_lag' + str(i)
df_temp.columns = idx_features + [lag_feature_name]
df_temp['date_block_num'] += i
df = df.merge(df_temp.drop_duplicates() ,
on=idx_features,
how='left')
df[lag_feature_name] = df[lag_feature_name].fillna(0)
if clip:
lag_features_to_clip.append(lag_feature_name)
df = downcast(df, False)
del df_temp
gc.collect()
return df, lag_features_to_clip<define_variables>
|
X = X/255.0
test_features = test_data/255.0
Y = keras.utils.to_categorical(Y, num_classes=10)
X = X.values.reshape(-1,28,28,1)
test_features = test_features.values.reshape(-1,28,28,1 )
|
Digit Recognizer
|
11,088,963 |
lag_features_to_clip = []
idx_features = ['date_block_num', 'shop_id', 'item_id']
all_data, lag_features_to_clip = add_lag_features(df=all_data,
lag_features_to_clip=lag_features_to_clip,
idx_features=idx_features,
lag_feature='item_cnt_month',
nlags=3,
clip=True )<drop_column>
|
X_train, X_val, y_train, y_val = train_test_split(X, Y, test_size = 0.1, random_state=2 )
|
Digit Recognizer
|
11,088,963 |
all_data, lag_features_to_clip = add_lag_features(df=all_data,
lag_features_to_clip=lag_features_to_clip,
idx_features=idx_features,
lag_feature='item_count',
nlags=3)
all_data, lag_features_to_clip = add_lag_features(df=all_data,
lag_features_to_clip=lag_features_to_clip,
idx_features=idx_features,
lag_feature='item_price_mean',
nlags=3 )<prepare_x_and_y>
|
datagen = keras.preprocessing.image.ImageDataGenerator(rotation_range=15,
width_shift_range=0.1,
zoom_range=0.1,
height_shift_range=0.1)
datagen.fit(X_train )
|
Digit Recognizer
|
11,088,963 |
X_test_temp = all_data[all_data['date_block_num'] == 34]
X_test_temp[item_mean_features].sum()<categorify>
|
input_shape =(28, 28, 1)
model = keras.Sequential()
model.add(keras.layers.Conv2D(filters=32, kernel_size=(5, 5), activation="relu", input_shape=input_shape, padding="Same"))
model.add(keras.layers.Conv2D(filters=32, kernel_size=(5, 5), activation="relu", padding="Same"))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.MaxPooling2D(( 2, 2)))
model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu", padding="Same"))
model.add(keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu", padding="Same"))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.MaxPooling2D(( 2, 2)))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, activation='relu'))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(10))
model.summary()
|
Digit Recognizer
|
11,088,963 |
for item_mean_feature in item_mean_features:
all_data, lag_features_to_clip = add_lag_features(df=all_data,
lag_features_to_clip=lag_features_to_clip,
idx_features=idx_features,
lag_feature=item_mean_feature,
nlags=3)
all_data = all_data.drop(item_mean_features, axis=1 )<feature_engineering>
|
adam_opt = keras.optimizers.Adam(learning_rate=0.0008, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False)
model.compile(optimizer=adam_opt,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics = ['accuracy'])
history = model.fit_generator(datagen.flow(X_train, y_train, batch_size=96),
steps_per_epoch=len(X_train)/96,
epochs=30,
validation_data=(X_val, y_val),
validation_freq=1,
shuffle=True )
|
Digit Recognizer
|
11,088,963 |
<drop_column><EOS>
|
submission = pd.DataFrame()
submission['ImageId'] = pd.Series(range(1,28001))
submission['Label'] = results
submission.to_csv('cnn_submission.csv',index=False )
|
Digit Recognizer
|
11,048,711 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
|
conv = Conv2D(filters=32, kernel_size=3, strides=1,
padding="SAME" )
|
Digit Recognizer
|
11,048,711 |
all_data['item_cnt_month_lag_mean'] = all_data[['item_cnt_month_lag1',
'item_cnt_month_lag2',
'item_cnt_month_lag3']].mean(axis=1 )<feature_engineering>
|
max_pool = MaxPool2D(pool_size=2 )
|
Digit Recognizer
|
11,048,711 |
all_data[lag_features_to_clip + ['item_cnt_month', 'item_cnt_month_lag_mean']] = all_data[lag_features_to_clip +['item_cnt_month', 'item_cnt_month_lag_mean']].clip(0, 20 )<feature_engineering>
|
global_avg_pool = GlobalAvgPool2D()
|
Digit Recognizer
|
11,048,711 |
all_data['lag_grad1'] = all_data['item_cnt_month_lag1']/all_data['item_cnt_month_lag2']
all_data['lag_grad1'] = all_data['lag_grad1'].replace([np.inf, -np.inf],
np.nan ).fillna(0)
all_data['lag_grad2'] = all_data['item_cnt_month_lag2']/all_data['item_cnt_month_lag3']
all_data['lag_grad2'] = all_data['lag_grad2'].replace([np.inf, -np.inf],
np.nan ).fillna(0 )<feature_engineering>
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.optimizers import SGD
from keras.callbacks import ReduceLROnPlateau
from functools import partial
|
Digit Recognizer
|
11,048,711 |
all_data['brand_new'] = all_data['first_sale_date'] == all_data['date_block_num']<drop_column>
|
train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
print(train.shape)
print(test.shape )
|
Digit Recognizer
|
11,048,711 |
all_data['duration_after_first_sale'] = all_data['date_block_num'] - all_data['first_sale_date']
all_data = all_data.drop('first_sale_date', axis=1 )<feature_engineering>
|
Y_train = train["label"]
X_train = train.drop(labels = ["label"],axis = 1)
X_test = test
Y_train.value_counts()
|
Digit Recognizer
|
11,048,711 |
all_data['month'] = all_data['date_block_num']%12<prepare_x_and_y>
|
X_train = X_train / 255.0
X_test = X_test / 255.0
|
Digit Recognizer
|
11,048,711 |
X_train = all_data[all_data['date_block_num'] < 33]
X_train = X_train.drop(['item_cnt_month'], axis=1)
X_valid = all_data[all_data['date_block_num'] == 33]
X_valid = X_valid.drop(['item_cnt_month'], axis=1)
X_test = all_data[all_data['date_block_num'] == 34]
X_test = X_test.drop(['item_cnt_month'], axis=1)
y_train = all_data[all_data['date_block_num'] < 33]['item_cnt_month']
y_valid = all_data[all_data['date_block_num'] == 33]['item_cnt_month']
del all_data
gc.collect() ;<create_dataframe>
|
print(Y_train[0:5])
Y_train = to_categorical(Y_train, num_classes = 10)
print(Y_train[0:5] )
|
Digit Recognizer
|
11,048,711 |
params = {'metric': 'rmse',
'num_leaves': 255,
'learning_rate': 0.005,
'feature_fraction': 0.75,
'bagging_fraction': 0.75,
'bagging_freq': 5,
'force_col_wise' : True,
'random_state': 10}
cat_features = ['shop_id', 'city', 'item_category_id', 'category', 'month']
dtrain = lgb.Dataset(X_train, y_train)
dvalid = lgb.Dataset(X_valid, y_valid)
lgb_model = lgb.train(params=params,
train_set=dtrain,
num_boost_round=1500,
valid_sets=(dtrain, dvalid),
early_stopping_rounds=150,
categorical_feature=cat_features,
verbose_eval=100 )<save_to_csv>
|
random_seed = 2
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.15, random_state=random_seed )
|
Digit Recognizer
|
11,048,711 |
preds = lgb_model.predict(X_test ).clip(0,20)
submission['item_cnt_month'] = preds
submission.to_csv('submission.csv', index=False )<drop_column>
|
DefaultConv2D = partial(Conv2D, kernel_size=3, activation='relu', padding="SAME")
model = Sequential([
DefaultConv2D(filters=32, kernel_size=5, input_shape=[28, 28, 1]),
DefaultConv2D(filters=32, kernel_size=5),
MaxPooling2D(pool_size=2),
DefaultConv2D(filters=64),
DefaultConv2D(filters=64),
MaxPooling2D(pool_size=2),
Flatten() ,
Dense(units=256, activation='relu'),
Dropout(0.5),
Dense(units=10, activation='softmax'),
] )
|
Digit Recognizer
|
11,048,711 |
del X_train, y_train, X_valid, y_valid, X_test, lgb_model, dtrain, dvalid
gc.collect() ;<load_from_csv>
|
model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"] )
|
Digit Recognizer
|
11,048,711 |
sales_train = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/sales_train.csv')
items = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/items.csv')
test_ids = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/test.csv')
sales_train = sales_train.drop(['date'], axis=1)
sales_train.head()<merge>
|
reduce_learning_rate = ReduceLROnPlateau(monitor = 'val_acc', patience = 3, verbose = 1, factor = 0.3, min_lr = 0.00001 )
|
Digit Recognizer
|
11,048,711 |
ids = list(np.arange(test_ids['ID'].max() +1)) *(sales_train['date_block_num'].max() +1)
dates = list(np.arange(sales_train['date_block_num'].max() +1)) *(test_ids['ID'].max() +1)
dates.sort()
date_id_dict = {'ID' : ids, 'date_block_num' : dates}
date_id_df = pd.DataFrame.from_dict(date_id_dict)
date_id_df = date_id_df.merge(test_ids, on='ID')
date_id_df.head()<merge>
|
history = model.fit(X_train, Y_train, batch_size = 100, epochs = 20,validation_data =(X_val,Y_val), callbacks=[reduce_learning_rate] )
|
Digit Recognizer
|
11,048,711 |
grouped = sales_train.groupby(['date_block_num', 'shop_id', 'item_id'], as_index=False)
item_cnt = pd.DataFrame(grouped.sum())
item_cnt = item_cnt.drop(['item_price'], axis=1)
grouped = sales_train.groupby(['shop_id', 'item_id'])
avg_price = pd.DataFrame(grouped.mean() ['item_price'])
monthly_sales = item_cnt.merge(avg_price, on=['shop_id', 'item_id'])
monthly_sales = monthly_sales.merge(test_ids, on=['shop_id', 'item_id'])
monthly_sales.head()<merge>
|
score = model.evaluate(X_val, Y_val, verbose=0)
print('Validation loss:', score[0])
print('Validation accuracy:', score[1] )
|
Digit Recognizer
|
11,048,711 |
item_price = monthly_sales[['item_price', 'ID']]
monthly_sales = monthly_sales.drop(['item_price'], axis=1)
monthly_sales = date_id_df.merge(monthly_sales, how='left', on=['ID', 'date_block_num'])
monthly_sales = monthly_sales.drop(['shop_id_y', 'item_id_y'], axis=1)
monthly_sales['item_cnt_day'].fillna(0, inplace=True)
monthly_sales = monthly_sales.merge(item_price, how='left', on='ID')
monthly_sales['item_price'].fillna(monthly_sales['item_price'].mean() , inplace=True)
monthly_sales = monthly_sales.drop_duplicates()
column_dict = {'shop_id_x' : 'shop_id', 'item_id_x' : 'item_id', 'item_cnt_day' : 'item_cnt_month', 'item_price' : 'avg_price'}
monthly_sales = monthly_sales.rename(columns=column_dict)
monthly_sales.head()<merge>
|
print("[INFO] evaluating network...")
predictions = model.predict(X_val)
print(classification_report(Y_val.argmax(axis=1),predictions.argmax(axis=1)) )
|
Digit Recognizer
|
11,048,711 |
monthly_sales = monthly_sales.merge(items, on='item_id')
monthly_sales = monthly_sales.drop(['item_name'], axis=1)
month = pd.DataFrame([x%12+1 for x in monthly_sales['date_block_num']], columns=['month'])
year = pd.DataFrame([np.floor(x/12)+2013 for x in monthly_sales['date_block_num']], columns=['year'])
monthly_sales = pd.concat([monthly_sales, month, year], axis=1)
monthly_sales = monthly_sales[['ID', 'date_block_num', 'shop_id', 'item_category_id', 'avg_price', 'month', 'year', 'item_cnt_month']]
monthly_sales.head()<merge>
|
results = model.predict(X_test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
11,048,711 |
def calculate_item_cnt_lagged(df, lag):
tmp = df[['date_block_num', 'ID', 'item_cnt_month']]
shifted = tmp.copy()
shifted.columns = ['date_block_num', 'ID', 'item_cnt_lag'+str(lag)]
shifted.date_block_num = shifted.date_block_num + lag
df = pd.merge(df, shifted, on=['date_block_num', 'ID'], how='left')
return df
for lag in range(1, 13):
monthly_sales = calculate_item_cnt_lagged(monthly_sales, lag)
monthly_sales = monthly_sales[monthly_sales['date_block_num'] > 11]
monthly_sales.head()<merge>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist.csv",index=False )
|
Digit Recognizer
|
11,025,326 |
id_price = monthly_sales[['ID', 'avg_price']]
x_test_df = test_ids.merge(items, on='item_id')
x_test_df = x_test_df.merge(id_price, how='left', on='ID')
x_test_df.insert(loc=2, column='month', value=11)
x_test_df.insert(loc=3, column='year', value=2015)
x_test_df.insert(loc=4, column='date_block_num', value=34)
x_test_df = x_test_df.drop_duplicates()
x_test_df['avg_price'].fillna(x_test_df['avg_price'].mean() , inplace=True)
monthly_sales_subset = monthly_sales[['ID', 'date_block_num', 'shop_id', 'item_category_id', 'avg_price', 'month', 'year', 'item_cnt_month']]
x_all_df = pd.concat(( x_test_df, monthly_sales_subset))
for lag in range(1, 13):
x_all_df = calculate_item_cnt_lagged(x_all_df, lag)
x_test_df = x_all_df[x_all_df['date_block_num'] == 34]
x_test_df = x_test_df[['ID', 'shop_id', 'item_category_id', 'avg_price', 'month', 'year',
'item_cnt_lag1', 'item_cnt_lag2', 'item_cnt_lag3', 'item_cnt_lag4',
'item_cnt_lag5', 'item_cnt_lag6', 'item_cnt_lag7', 'item_cnt_lag8',
'item_cnt_lag9', 'item_cnt_lag10', 'item_cnt_lag11', 'item_cnt_lag12']]
x_test_df.head()<import_modules>
|
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, KFold
import tensorflow as tf
from tensorflow.keras import models, layers, optimizers
from tensorflow.keras.utils import to_categorical
|
Digit Recognizer
|
11,025,326 |
from scipy.sparse import hstack, vstack
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import OneHotEncoder, normalize<prepare_x_and_y>
|
train_df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test_df = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
sub_df = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")
train_df.shape, test_df.shape
|
Digit Recognizer
|
11,025,326 |
x_train_price = np.array(monthly_sales['avg_price'] ).reshape(-1, 1)
x_test_price = np.array(x_test_df['avg_price'] ).reshape(-1, 1)
x_train_price = normalize(x_train_price)
x_test_price = normalize(x_test_price)
x_train_lags = np.array(monthly_sales[['item_cnt_lag1', 'item_cnt_lag2', 'item_cnt_lag3', 'item_cnt_lag4',
'item_cnt_lag5', 'item_cnt_lag6', 'item_cnt_lag7', 'item_cnt_lag8',
'item_cnt_lag9', 'item_cnt_lag10', 'item_cnt_lag11', 'item_cnt_lag12']])
x_test_lags = np.array(x_test_df[['item_cnt_lag1', 'item_cnt_lag2', 'item_cnt_lag3', 'item_cnt_lag4',
'item_cnt_lag5', 'item_cnt_lag6', 'item_cnt_lag7', 'item_cnt_lag8',
'item_cnt_lag9', 'item_cnt_lag10', 'item_cnt_lag11', 'item_cnt_lag12']])
x_train_categorical = np.array(monthly_sales[['shop_id', 'item_category_id', 'month', 'year']])
x_test_categorical = np.array(x_test_df[['shop_id', 'item_category_id', 'month', 'year']])
x_all_categorical = np.concatenate(( x_train_categorical, x_test_categorical))
y_train = np.array(monthly_sales['item_cnt_month'])
encoder = OneHotEncoder()
encoder.fit(x_all_categorical)
x_train_categorical = encoder.transform(x_train_categorical)
x_test_categorical = encoder.transform(x_test_categorical)
x_train = hstack([x_train_categorical, x_train_price, x_train_lags])
x_test = hstack([x_test_categorical, x_test_price, x_test_lags])
y_train = np.clip(y_train, 0, 20)
print(x_train.shape)
print(x_test.shape )<compute_train_metric>
|
train_X, test_X = train_test_split(train_df, test_size=0.2, random_state=1)
train_y, test_y = train_X.pop("label"), test_X.pop("label")
train_X, test_X = train_X.values, test_X.values
train_X.shape
|
Digit Recognizer
|
11,025,326 |
gradient_boost = GradientBoostingRegressor(n_estimators=500)
gradient_boost.fit(x_train, y_train)
train_pred = gradient_boost.predict(x_train)
rmse = np.sqrt(mean_squared_error(y_train, train_pred))
print(f"RMSE on training set: {rmse}" )<save_to_csv>
|
train_X = train_X.reshape(( train_X.shape[0], 28, 28, 1))
test_X = test_X.reshape(( test_X.shape[0], 28, 28, 1))
train_X, test_X = train_X / 255.0, test_X / 255.0
train_y = to_categorical(train_y)
test_y = to_categorical(test_y)
train_X.shape, test_X.shape, train_y.shape, test_y.shape
|
Digit Recognizer
|
11,025,326 |
test_pred = gradient_boost.predict(x_test)
test_pred = x_test_df.assign(item_cnt_month=test_pred)
test_pred = test_pred[['ID', 'item_cnt_month']]
test_pred = test_pred.sort_values(by='ID')
test_pred.to_csv('submission.csv', index=False )<set_options>
|
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32,(5, 5), activation="relu",
kernel_initializer="he_uniform",
kernel_regularizer=tf.keras.regularizers.l2(0.001),
padding="same",
input_shape=(28, 28, 1)) ,
tf.keras.layers.Conv2D(32,(5, 5), activation="relu",
kernel_initializer="he_uniform",
kernel_regularizer=tf.keras.regularizers.l2(0.001),
padding="same"),
tf.keras.layers.MaxPooling2D(( 2, 2)) ,
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(64,(3, 3), activation="relu",
kernel_initializer="he_uniform",
kernel_regularizer=tf.keras.regularizers.l2(0.001),
padding="same"),
tf.keras.layers.Conv2D(64,(3, 3), activation="relu",
kernel_initializer="he_uniform",
kernel_regularizer=tf.keras.regularizers.l2(0.001),
padding="same"),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.MaxPooling2D(( 2, 2)) ,
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(256, activation="relu",
kernel_initializer="uniform"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation="softmax")
])
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-02, decay=0.0),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=["accuracy"])
history = model.fit(train_X, train_y, epochs=17, validation_split=0.2, verbose=1)
score = model.evaluate(test_X, test_y, verbose=0)
print(f"Test_loss: {score[0]} / Test accuracy: {score[1]}" )
|
Digit Recognizer
|
11,025,326 |
sns.set(style="darkgrid")
rcParams['figure.figsize'] = 12, 4<load_from_csv>
|
n_folds = 10
acc_fold = []
loss_fold = []
inputs = np.concatenate(( train_X, test_X), axis=0)
targets = np.concatenate(( train_y, test_y), axis=0)
|
Digit Recognizer
|
11,025,326 |
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
cats = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv' )<feature_engineering>
|
def evaluate_model(inputs, targets, n_folds=n_folds):
kfold = KFold(n_splits=n_folds, shuffle=True, random_state=2)
fold_no = 1
for train_ix, test_ix in kfold.split(inputs, targets):
print(f"Train for fold {fold_no}...")
history = model.fit(inputs[train_ix], targets[train_ix], epochs=7, verbose=1)
scores = model.evaluate(inputs[test_ix], targets[test_ix], verbose=0)
print(f"Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%")
acc_fold.append(scores[1] * 100)
loss_fold.append(scores[0])
fold_no = fold_no + 1
evaluate_model(inputs, targets)
print("score per fold" )
|
Digit Recognizer
|
11,025,326 |
train =(
train
[
(train['item_price'] > 0)&
(train['item_price'] < 300000)&
(train['item_cnt_day'] < 1000)
]
.reset_index(drop = True)
)
train.loc[train['item_cnt_day'] < 0, 'item_cnt_day'] = 0<feature_engineering>
|
test_df = test_df.values
test_df = test_df.reshape(( test_df.shape[0], 28, 28, 1))
test_df = test_df / 255.0
|
Digit Recognizer
|
11,025,326 |
for i in [(0, 57),(1, 58),(10, 11)]:
train.loc[train['shop_id'] == i[0], 'shop_id'] = i[1]
test.loc[test['shop_id'] == i[0], 'shop_id'] = i[1]<feature_engineering>
|
pred = model.predict(test_df)
pred = [np.argmax(y, axis=None, out=None)for y in pred]
|
Digit Recognizer
|
11,025,326 |
shops.loc[shops['shop_name'] == 'Сергиев Посад ТЦ "7Я"', 'shop_name'] = 'СергиевПосад ТЦ "7Я"'
shops['city'] = shops.shop_name.str.split(' ' ).map(lambda x: x[0])
shops['category'] = shops.shop_name.str.split(' ' ).map(lambda x: x[1])
shops.loc[shops['city'] == '!Якутск', 'city'] = 'Якутск'<feature_engineering>
|
sub_df["Label"] = pred
sub_df.head()
|
Digit Recognizer
|
11,025,326 |
categories = []
for categ in shops['category'].unique() :
if len(shops[shops['category'] == categ])> 4:
categories.append(categ)
shops['category'] = shops['category'].apply(lambda x: x if x in categories else 'other' )<categorify>
|
sub_df.to_csv("my_submision1.csv", index=False )
|
Digit Recognizer
|
10,987,682 |
shops['shop_category'] = LabelEncoder().fit_transform(shops['category'])
shops['shop_city'] = LabelEncoder().fit_transform(shops['city'])
shops = shops[['shop_id', 'shop_category', 'shop_city']]
<feature_engineering>
|
train = np.loadtxt('/kaggle/input/digit-recognizer/train.csv', delimiter=',', skiprows=1)
test = np.loadtxt('/kaggle/input/digit-recognizer/test.csv', delimiter=',', skiprows=1 )
|
Digit Recognizer
|
10,987,682 |
cats['type_code'] =(
cats['item_category_name']
.apply(
lambda x: x.split(' ')[0]
)
.astype(str)
)
cats.loc[
(cats['type_code'] == 'Игровые')|
(cats['type_code'] == 'Аксессуары'),
'category'
] = 'Игры'
<feature_engineering>
|
train_label = train[:, 0]
train_img = np.resize(train[:, 1:],(train.shape[0], 28, 28, 1))
test_img = np.resize(test,(test.shape[0], 28, 28, 1))
|
Digit Recognizer
|
10,987,682 |
categories = []
for categ in cats['type_code'].unique() :
if len(cats[cats['type_code'] == categ])> 4:
categories.append(categ)
cats['type_code'] = cats['type_code'].apply(lambda x: x if x in categories else 'etc' )<categorify>
|
x_train, x_val, y_train, y_val = train_test_split(train_img, train_label, test_size=0.2, random_state=42 )
|
Digit Recognizer
|
10,987,682 |
cats['type_code'] = LabelEncoder().fit_transform(cats['type_code'])
cats['split'] =(
cats['item_category_name']
.apply(lambda x: x.split('-'))
)
cats['subtype'] =(
cats['split']
.apply(
lambda x: x[1].strip() if len(x)>= 2 else x[0].strip()
)
)
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id', 'subtype_code', 'type_code']]<categorify>
|
epochs = 50
batch_size = 128
validation_steps = 10000
|
Digit Recognizer
|
10,987,682 |
def name_correction(x):
x = x.lower()
x = x.partition('[')[0]
x = x.partition('(')[0]
x = re.sub('\W+', ' ', x)
x = x.replace(' ', ' ')
x = x.strip()
return x<feature_engineering>
|
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64,
kernel_size=(3,3),
padding='same',
activation=partial(tf.nn.leaky_relu, alpha=1e-2),
input_shape=x_train.shape[1:]))
model.add(tf.keras.layers.Conv2D(filters=64,
kernel_size=3,
padding='same',
activation=partial(tf.nn.leaky_relu, alpha=1e-2)))
model.add(tf.keras.layers.Conv2D(filters=64,
kernel_size=(3,3),
padding='same',
activation=partial(tf.nn.leaky_relu, alpha=1e-2)))
model.add(tf.keras.layers.Conv2D(filters=128,
kernel_size=(3,3),
padding='same',
activation=partial(tf.nn.leaky_relu, alpha=1e-2)))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(filters=128,
kernel_size=(3,3),
padding='same',
activation=partial(tf.nn.leaky_relu, alpha=1e-2)))
model.add(tf.keras.layers.Conv2D(filters=192,
kernel_size=(3,3),
padding='same',
activation=partial(tf.nn.leaky_relu, alpha=1e-2)))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Conv2D(filters=192,
kernel_size=(5,5),
padding='same',
activation=partial(tf.nn.leaky_relu, alpha=1e-2)))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation=partial(tf.nn.leaky_relu, alpha=1e-2)))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
|
Digit Recognizer
|
10,987,682 |
items['name1'], items['name2'] = items['item_name'].str.split('[', 1 ).str
items['name1'], items['name3'] = items['item_name'].str.split('(', 1 ).str
items['name2'] = items['name2'].str.replace('\W+', ' ' ).str.lower()
items['name3'] = items['name3'].str.replace('\W+', ' ' ).str.lower()
items = items.fillna('0')
items['item_name'] = items['item_name'].apply(lambda x: name_correction(x))
items['name2'] = items['name2'].apply(lambda x: x[:-1] if x != '0' else '0' )<feature_engineering>
|
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4, epsilon=1e-8),
loss='categorical_crossentropy',
metrics=['accuracy'] )
|
Digit Recognizer
|
10,987,682 |
items['type'] =(
items['name2']
.apply(
lambda x: x[0:8] if x.split(' ')[0] == 'xbox' else x.split(' ')[0]
)
)
items.loc[
(items['type'] == 'x360')|
(items['type'] == 'xbox360')|
(items['type'] == 'xbox 360'),
'type'
] = 'xbox 360'
items.loc[items['type'] == '', 'type'] = 'mac'
items.type =(
items['type']
.apply(
lambda x: x.replace(' ', '')
)
)
items.loc[
(items['type'] == 'pc')|
(items['type'] == 'pс')|
(items['type'] == 'pс'),
'type'
] = 'pс'
items.loc[items['type'] == 'рs3' , 'type'] = 'рs3'<drop_column>
|
data_aug = tf.keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False )
|
Digit Recognizer
|
10,987,682 |
group_sum =(
items
.groupby('type')
.agg({'item_id': 'count'})
.reset_index()
)
drop_cols = []
for categ in group_sum['type'].unique() :
if group_sum.loc[(group_sum['type'] == categ), 'item_id'].values[0] <= 39:
drop_cols.append(categ)
items['name2'] =(
items['name2']
.apply(
lambda x: 'other' if x in drop_cols else x
)
)
items = items.drop(['type'], axis=1 )<categorify>
|
callback = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
patience=1,
verbose=2,
factor=0.5,
min_lr=1e-7 )
|
Digit Recognizer
|
10,987,682 |
items['name2'] = LabelEncoder().fit_transform(items['name2'])
items['name3'] = LabelEncoder().fit_transform(items['name3'])
items.drop(['item_name', 'name1'], axis=1, inplace=True)
<data_type_conversions>
|
y_train_labels = tf.keras.utils.to_categorical(y_train)
y_val_labels = tf.keras.utils.to_categorical(y_val )
|
Digit Recognizer
|
10,987,682 |
matrix = []
cols = ['date_block_num', 'shop_id', 'item_id']
for i in range(34):
sales = train[train['date_block_num'] == i]
matrix.append(
np.array(
list(product(
[i],
sales['shop_id'].unique() ,
sales['item_id'].unique()
)) ,
dtype = np.int16
)
)
matrix = pd.DataFrame(np.vstack(matrix), columns=cols)
matrix = matrix.astype({
'date_block_num': np.int8,
'shop_id': np.int8,
'item_id': np.int16
})
matrix.sort_values(cols, inplace=True )<feature_engineering>
|
train_aug = data_aug.flow(x_train, y_train_labels, batch_size=batch_size)
val_aug = data_aug.flow(x_val, y_val_labels, batch_size=batch_size )
|
Digit Recognizer
|
10,987,682 |
train['revenue'] = train['item_cnt_day'] * train['item_price']<merge>
|
hist = model.fit(train_aug,
steps_per_epoch=x_train.shape[0]//batch_size,
epochs=epochs,
validation_data=val_aug,
validation_steps=validation_steps//batch_size,
callbacks=[callback] )
|
Digit Recognizer
|
10,987,682 |
group =(
train
.groupby(['date_block_num', 'shop_id', 'item_id'])
.agg({
'item_cnt_day': 'sum'
})
)
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=cols, how='left')
matrix['item_cnt_month'] =(
matrix['item_cnt_month']
.fillna(0)
.astype(np.float16)
)<data_type_conversions>
|
y_pred = model.predict(x_val)
y_pred_labels = np.argmax(y_pred, axis=1 )
|
Digit Recognizer
|
10,987,682 |
test['date_block_num'] = 34
test =(
test
.astype({
'date_block_num': np.int8,
'shop_id': np.int8,
'item_id': np.int16
})
)<concatenate>
|
print(classification_report(y_val, y_pred_labels))
|
Digit Recognizer
|
10,987,682 |
matrix = pd.concat(
[matrix, test.drop(['ID'], axis=1)],
ignore_index=True, sort=False, keys=cols
)
matrix.fillna(0, inplace=True )<merge>
|
y_pred_test = model.predict(test_img)
y_pred_test_labels = np.argmax(y_pred_test, axis=1 )
|
Digit Recognizer
|
10,987,682 |
matrix = pd.merge(matrix, shops, on='shop_id', how='left')
matrix = pd.merge(matrix, items, on='item_id', how='left')
matrix = pd.merge(matrix, cats, on='item_category_id', how='left')
matrix =(
matrix
.astype({
'shop_city': np.int8,
'shop_category': np.int8,
'item_category_id': np.int8,
'subtype_code': np.int8,
'name2': np.int8,
'name3': np.int16,
'type_code': np.int8
})
)<merge>
|
submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
submission['Label'] = y_pred_test_labels.astype(int)
submission.head(10 )
|
Digit Recognizer
|
10,987,682 |
def lag_feature(df, lags, cols):
for col in cols:
tmp = df[['date_block_num', 'shop_id', 'item_id', col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num', 'shop_id', 'item_id', col + "_lag_" + str(i)]
shifted['date_block_num'] = shifted['date_block_num'] + i
df = pd.merge(df, shifted, on=['date_block_num', 'shop_id', 'item_id'], how='left')
return df<concatenate>
|
submission.to_csv('submission.csv', index=False, header=True )
|
Digit Recognizer
|
10,939,062 |
matrix = lag_feature(matrix, [1, 2, 3], ['item_cnt_month'] )<merge>
|
import matplotlib.pyplot as plt
from collections import Counter
|
Digit Recognizer
|
10,939,062 |
group =(
matrix
.groupby('date_block_num')
.agg({
'item_cnt_month' : 'mean'
})
)
group.columns = ['date_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on='date_block_num', how="left")
matrix['date_avg_item_cnt'] = matrix['date_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], ['date_avg_item_cnt'])
matrix.drop(['date_avg_item_cnt'], axis=1, inplace=True )<merge>
|
fg_unet = pd.read_csv("/kaggle/input/mnist-w-fgunet-output/submission.csv")
vgg = pd.read_csv("/kaggle/input/mnist-w-vgg16-output/submission.csv")
resnet = pd.read_csv("/kaggle/input/mnist-w-resnet-output/submission.csv" )
|
Digit Recognizer
|
10,939,062 |
group =(
matrix
.groupby(['date_block_num', 'item_id'])
.agg({
'item_cnt_month': 'mean'
})
)
group.columns = ['date_item_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'item_id'], how='left')
matrix['date_item_avg_item_cnt'] = matrix['date_item_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1, 2, 3], ['date_item_avg_item_cnt'])
matrix.drop(['date_item_avg_item_cnt'], axis=1, inplace=True )<merge>
|
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
sub = pd.read_csv("/kaggle/input/mnist-w-resnet-output/submission.csv" )
|
Digit Recognizer
|
10,939,062 |
group =(
matrix
.groupby(['date_block_num', 'shop_id'])
.agg({
'item_cnt_month': 'mean'
})
)
group.columns = ['date_shop_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id'], how='left')
matrix['date_shop_avg_item_cnt'] = matrix['date_shop_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1, 2, 3], ['date_shop_avg_item_cnt'])
matrix.drop(['date_shop_avg_item_cnt'], axis=1, inplace=True )<merge>
|
majority = sub.copy()
no_maj = []
unequal = []
for i in range(len(vgg)) :
lst = [fg_unet.iloc[i].Label, vgg.iloc[i].Label, resnet.iloc[i].Label]
if not all(ele == lst[0] for ele in lst):
unequal.append(i)
count = Counter(lst ).most_common()
if len(count)==len(lst):
no_maj.append(i)
majority.iloc[i].Label = lst[-1]
img = test.iloc[i,:].values
img = img.reshape(28, 28)
print(i, lst)
plt.imshow(img)
plt.show()
else:
majority.iloc[i].Label = count[0][0]
else:
majority.iloc[i].Label = lst[0]
print("Number of rows not all equal: ", len(unequal))
print("Number of rows with no majority: ", len(no_maj))
majority.to_csv("submission_maj.csv", index=False )
|
Digit Recognizer
|
10,939,062 |
group =(
matrix
.groupby(['date_block_num', 'shop_id', 'item_id'])
.agg({
'item_cnt_month': 'mean'
})
)
group.columns = ['date_shop_item_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id', 'item_id'], how='left')
matrix['date_shop_item_avg_item_cnt'] = matrix['date_shop_item_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1, 2, 3], ['date_shop_item_avg_item_cnt'])
matrix.drop(['date_shop_item_avg_item_cnt'], axis=1, inplace=True )<merge>
|
from keras.models import load_model
from keras.utils import to_categorical
|
Digit Recognizer
|
10,939,062 |
group =(
matrix
.groupby(['date_block_num', 'shop_id', 'subtype_code'])
.agg({
'item_cnt_month': 'mean'
})
)
group.columns = ['date_shop_subtype_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id', 'subtype_code'], how='left')
matrix['date_shop_subtype_avg_item_cnt'] = matrix['date_shop_subtype_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], ['date_shop_subtype_avg_item_cnt'])
matrix.drop(['date_shop_subtype_avg_item_cnt'], axis=1, inplace=True )<data_type_conversions>
|
fg_unet = load_model("/kaggle/input/mnist-w-fgunet-output/best_model.h5")
vgg = load_model("/kaggle/input/mnist-w-vgg16-output/best_model.h5")
resnet = load_model("/kaggle/input/mnist-w-resnet-output/best_model.h5" )
|
Digit Recognizer
|
10,939,062 |
group =(
matrix
.groupby(['date_block_num', 'shop_city'])
.agg({
'item_cnt_month': 'mean'
})
)
group.columns = ['date_city_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_city'], how='left')
matrix['date_city_avg_item_cnt'] = matrix['date_city_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], ['date_city_avg_item_cnt'])
matrix.drop(['date_city_avg_item_cnt'], axis=1, inplace=True )<merge>
|
X = [train.iloc[i,1:].values for i in range(len(train)) ]
X = [x.reshape(28,28)for x in X]
X_28 = [x.reshape(28,28,1,1)for x in X]
X_28 = np.array(X_28)
X = [np.pad(x, 2)for x in X]
X = np.array(X)
X = X.reshape(X.shape[0],X.shape[1], X.shape[2],1)
X = np.repeat(X, 3, axis=-1 )
|
Digit Recognizer
|
10,939,062 |
group =(
matrix
.groupby(['date_block_num', 'item_id', 'shop_city'])
.agg({
'item_cnt_month': 'mean'
})
)
group.columns = ['date_item_city_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'item_id', 'shop_city'], how='left')
matrix['date_item_city_avg_item_cnt'] = matrix['date_item_city_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], ['date_item_city_avg_item_cnt'])
matrix.drop(['date_item_city_avg_item_cnt'], axis=1, inplace=True )<merge>
|
X_test = [test.iloc[i,:].values for i in range(len(test)) ]
X_test = [x.reshape(28,28)for x in X_test]
X_test_28 = [x.reshape(28,28,1,1)for x in X_test]
X_test_28 = np.array(X_test_28)
X_test = [np.pad(x, 2)for x in X_test]
X_test = np.array(X_test)
X_test = X_test.reshape(X_test.shape[0],X_test.shape[1], X_test.shape[2],1)
X_test = np.repeat(X_test, 3, axis=-1 )
|
Digit Recognizer
|
10,939,062 |
group =(
train
.groupby('item_id')
.agg({
'item_price': 'mean'
})
)
group.columns = ['item_avg_item_price']
group.reset_index(inplace=True)
matrix = matrix.merge(group, on='item_id', how='left')
matrix['item_avg_item_price'] = matrix['item_avg_item_price'].astype(np.float16)
group =(
train
.groupby(['date_block_num', 'item_id'])
.agg({
'item_price': 'mean'
})
)
group.columns = ['date_item_avg_item_price']
group.reset_index(inplace=True)
matrix = matrix.merge(group, on=['date_block_num', 'item_id'], how='left')
matrix['date_item_avg_item_price'] = matrix['date_item_avg_item_price'].astype(np.float16)
lags = [1, 2, 3]
matrix = lag_feature(matrix, lags, ['date_item_avg_item_price'])
for i in lags:
matrix['delta_price_lag_' + str(i)] =(
matrix['date_item_avg_item_price_lag_' + str(i)] -\
matrix['item_avg_item_price']
)/ matrix['item_avg_item_price']
def select_trends(row):
for i in lags:
if row['delta_price_lag_' + str(i)]:
return row['delta_price_lag_' + str(i)]
return 0
matrix['delta_price_lag_'] = matrix.apply(select_trends, axis=1)
matrix['delta_price_lag_'] = matrix['delta_price_lag_'].astype(np.float16)
matrix['delta_price_lag_'].fillna(0, inplace=True)
features_to_drop = ['item_avg_item_price', 'date_item_avg_item_price']
for i in lags:
features_to_drop.append('date_item_avg_item_price_lag_' + str(i))
features_to_drop.append('delta_price_lag_' + str(i))
matrix.drop(features_to_drop, axis=1, inplace=True )<merge>
|
f_y_train = fg_unet.predict(X_28, verbose=1)
v_y_train = vgg.predict(X, verbose=1)
r_y_train = resnet.predict(X, verbose=1 )
|
Digit Recognizer
|
10,939,062 |
group =(
train
.groupby(['date_block_num', 'shop_id'])
.agg({
'revenue': 'sum'
})
)
group.columns = ['date_shop_revenue']
group.reset_index(inplace=True)
matrix = matrix.merge(group, on=['date_block_num', 'shop_id'], how='left')
matrix['date_shop_revenue'] = matrix['date_shop_revenue'].astype(np.float32)
group =(
group
.groupby('shop_id')
.agg({
'date_block_num': 'mean'
})
)
group.columns = ['shop_avg_revenue']
group.reset_index(inplace=True)
matrix = matrix.merge(group, on='shop_id', how='left')
matrix['shop_avg_revenue'] = matrix['shop_avg_revenue'].astype(np.float32)
matrix['delta_revenue'] =(
matrix['date_shop_revenue'] - matrix['shop_avg_revenue']
)/ matrix['shop_avg_revenue']
matrix['delta_revenue'] = matrix['delta_revenue'].astype(np.float32)
matrix = lag_feature(matrix, [1], ['delta_revenue'])
matrix['delta_revenue_lag_1'] = matrix['delta_revenue_lag_1'].astype(np.float32)
matrix.drop(
['date_shop_revenue', 'shop_avg_revenue', 'delta_revenue'],
axis=1, inplace=True
)<categorify>
|
f_y_test = fg_unet.predict(X_test_28, verbose=1)
v_y_test = vgg.predict(X_test, verbose=1)
r_y_test = resnet.predict(X_test, verbose=1 )
|
Digit Recognizer
|
10,939,062 |
matrix['month'] = matrix['date_block_num'] % 12
days = pd.Series([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
matrix['days'] = matrix['month'].map(days ).astype(np.int8 )<categorify>
|
n_classes = 10
y = [train.iloc[i,0] for i in range(len(train)) ]
y = np.array(y)
print(np.unique(y, return_counts=True))
y = to_categorical(y, num_classes=n_classes)
y.shape
|
Digit Recognizer
|
10,939,062 |
matrix['item_shop_first_sale'] =(
matrix['date_block_num'] - matrix.groupby(['item_id', 'shop_id'])['date_block_num'].transform('min')
)
matrix['item_first_sale'] =(
matrix['date_block_num'] - matrix.groupby(['item_id'])['date_block_num'].transform('min')
)<drop_column>
|
X = np.hstack([f_y_train, v_y_train, r_y_train])
rid = Ridge()
rid.fit(X, y )
|
Digit Recognizer
|
10,939,062 |
matrix = matrix[matrix['date_block_num'] >= 4]
matrix.head().T<create_dataframe>
|
X_pred = np.hstack([f_y_test, v_y_test, r_y_test] )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.