kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
12,294,222 |
for i in range(0,num):
X_train_np = trains[i][features].values.astype(np.float32)
X_valid_np = valids[i][features].values.astype(np.float32)
tr_data = lgb.Dataset(X_train_np, label=trains[i][target], feature_name=list(features))
va_data = lgb.Dataset(X_valid_np, label=valids[i][target], feature_name=list(features))
del trains
del valids
del X_train_np
del X_valid_np
gc.collect()
model = lgb.train(
params,
tr_data,
num_boost_round=5000,
valid_sets=[tr_data, va_data],
early_stopping_rounds=50,
feature_name=features,
categorical_feature=categorical_columns,
verbose_eval=50
)
clfs.append(model)
fig,ax = plt.subplots(figsize=(15,15))
lgb.plot_importance(model, ax=ax,importance_type='gain',max_num_features=50)
plt.show()
del tr_data
del va_data
gc.collect()
<choose_model_class>
|
y = to_categorical(y, num_classes = 10)
y[0]
|
Digit Recognizer
|
12,294,222 |
MAX_SEQ = 100
class FFN(nn.Module):
def __init__(self, state_size=200):
super(FFN, self ).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, state_size)
self.relu = nn.ReLU()
self.lr2 = nn.Linear(state_size, state_size)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = self.lr1(x)
x = self.relu(x)
x = self.lr2(x)
return self.dropout(x)
def future_mask(seq_length):
future_mask = np.triu(np.ones(( seq_length, seq_length)) , k=1 ).astype('bool')
return torch.from_numpy(future_mask)
class SAKTModel(nn.Module):
def __init__(self, n_skill, max_seq=MAX_SEQ, embed_dim=128):
super(SAKTModel, self ).__init__()
self.n_skill = n_skill
self.embed_dim = embed_dim
self.embedding = nn.Embedding(2*n_skill+1, embed_dim)
self.pos_embedding = nn.Embedding(max_seq-1, embed_dim)
self.e_embedding = nn.Embedding(n_skill+1, embed_dim)
self.multi_att = nn.MultiheadAttention(embed_dim=embed_dim, num_heads=8, dropout=0.2)
self.dropout = nn.Dropout(0.2)
self.layer_normal = nn.LayerNorm(embed_dim)
self.ffn = FFN(embed_dim)
self.pred = nn.Linear(embed_dim, 1)
def forward(self, x, question_ids):
device = x.device
x = self.embedding(x)
pos_id = torch.arange(x.size(1)).unsqueeze(0 ).to(device)
pos_x = self.pos_embedding(pos_id)
x = x + pos_x
e = self.e_embedding(question_ids)
x = x.permute(1, 0, 2)
e = e.permute(1, 0, 2)
att_mask = future_mask(x.size(0)).to(device)
att_output, att_weight = self.multi_att(e, x, x, attn_mask=att_mask)
att_output = self.layer_normal(att_output + e)
att_output = att_output.permute(1, 0, 2)
x = self.ffn(att_output)
x = self.layer_normal(x + att_output)
x = self.pred(x)
return x.squeeze(-1), att_weight
skills = joblib.load("/kaggle/input/riiid-sakt-model-dataset-public/skills.pkl.zip")
n_skill = len(skills)
group = joblib.load("/kaggle/input/riiid-sakt-model-dataset-public/group.pkl.zip")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
nn_model = SAKTModel(n_skill, embed_dim=128)
try:
nn_model.load_state_dict(torch.load("/kaggle/input/riiid-sakt-model-dataset-public/sakt_model.pt"))
except:
nn_model.load_state_dict(torch.load("/kaggle/input/riiid-sakt-model-dataset-public/sakt_model.pt", map_location='cpu'))
nn_model.to(device)
nn_model.eval()
<data_type_conversions>
|
train = train.values.reshape(train.shape[0], 28, 28, 1)
test = test.values.reshape(test.shape[0], 28, 28, 1)
print('Reshaped Train set: ',train.shape, " & Reshaped Test Set", test.shape )
|
Digit Recognizer
|
12,294,222 |
user_sum_dict = user_agg['sum'].astype('int16' ).to_dict(defaultdict(int))
user_count_dict = user_agg['count'].astype('int16' ).to_dict(defaultdict(int))
<data_type_conversions>
|
train = train.astype("float32")/255.0
test = test.astype("float32")/255.0
|
Digit Recognizer
|
12,294,222 |
del user_agg
gc.collect()
task_container_sum_dict = task_container_agg['sum'].astype('int32' ).to_dict(defaultdict(int))
task_container_count_dict = task_container_agg['count'].astype('int32' ).to_dict(defaultdict(int))
task_container_std_dict = task_container_agg['var'].astype('float16' ).to_dict(defaultdict(int))
explanation_sum_dict = explanation_agg['sum'].astype('int16' ).to_dict(defaultdict(int))
explanation_count_dict = explanation_agg['count'].astype('int16' ).to_dict(defaultdict(int))
del task_container_agg
del explanation_agg
gc.collect()<data_type_conversions>
|
X_train, X_val, y_train, y_val = train_test_split(train, y, test_size=0.25, random_state=0)
print("Number of samples in Training set :", X_train.shape[0])
print("Number of samples in Validation set :", X_val.shape[0] )
|
Digit Recognizer
|
12,294,222 |
user_lecture_sum_dict = user_lecture_agg['sum'].astype('int16' ).to_dict(defaultdict(int))
user_lecture_count_dict = user_lecture_agg['count'].astype('int16' ).to_dict(defaultdict(int))
del user_lecture_agg
gc.collect()<categorify>
|
train_datagen = ImageDataGenerator(rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1
)
training_set = train_datagen.flow(X_train, y_train,
batch_size=64
)
val_datagen = ImageDataGenerator()
val_set = val_datagen.flow(X_val, y_val,
batch_size=64
)
|
Digit Recognizer
|
12,294,222 |
max_timestamp_u_dict=max_timestamp_u.set_index('user_id' ).to_dict()
max_timestamp_u_dict2=max_timestamp_u2.set_index('user_id' ).to_dict()
max_timestamp_u_dict3=max_timestamp_u3.set_index('user_id' ).to_dict()
user_prior_question_elapsed_time_dict=user_prior_question_elapsed_time.set_index('user_id' ).to_dict()
del max_timestamp_u
del max_timestamp_u2
del max_timestamp_u3
del user_prior_question_elapsed_time
gc.collect()<categorify>
|
model = tf.keras.models.Sequential()
model.add(Conv2D(64, kernel_size=(5,5), padding='same', activation='relu', input_shape=(28,28,1)))
model.add(Conv2D(64, kernel_size=(5,5), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128, kernel_size=(3,3), activation='relu', padding='same'))
model.add(Conv2D(128, kernel_size=(3,3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
model.summary()
|
Digit Recognizer
|
12,294,222 |
attempt_no_sum_dict = attempt_no_agg['sum'].to_dict(defaultdict(int))
del attempt_no_agg
gc.collect()<feature_engineering>
|
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=4,
verbose=1,
min_delta=0.0001 )
|
Digit Recognizer
|
12,294,222 |
def get_max_attempt(user_id,content_id):
k =(user_id,content_id)
if k in attempt_no_sum_dict.keys() :
attempt_no_sum_dict[k]+=1
return attempt_no_sum_dict[k]
attempt_no_sum_dict[k] = 1
return attempt_no_sum_dict[k]<feature_engineering>
|
steps_per_epoch = training_set.n // training_set.batch_size
validation_steps = val_set.n // val_set.batch_size
hist = model.fit(x=training_set,
validation_data=val_set,
epochs=35,
callbacks=[reduce_lr],
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps
)
|
Digit Recognizer
|
12,294,222 |
<define_variables>
|
_, acc_val = model.evaluate(val_set)
_, acc_tr = model.evaluate(val_set)
print("
Final Accuracy on training set : {:.2f}% & accuracy on validation is set: {:.2f}%".format(acc_tr*100, acc_val*100))
|
Digit Recognizer
|
12,294,222 |
iter_test = env.iter_test()
prior_test_df = None
prev_test_df1 = None<define_search_space>
|
pred = model.predict(test)
res = np.argmax(pred, axis=1)
submission = pd.DataFrame({"ImageId":[i+1 for i in range(len(test)) ],
"Label": res})
submission.head(10 )
|
Digit Recognizer
|
12,294,222 |
N=[0.4,0.6]<feature_engineering>
|
submission.to_csv("submission.csv", index=False )
|
Digit Recognizer
|
12,294,222 |
<import_modules><EOS>
|
submission.to_csv("submission.csv", index=False )
|
Digit Recognizer
|
12,110,917 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<import_modules>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
|
Digit Recognizer
|
12,110,917 |
class Predictor(object):
def __init__(self, model, model_metadata, ohe_categorical_index_vocab,
mhe_categorical_index_vocab):
self._model = model
self._model_metadata = model_metadata
self._ohe_categorical_index_vocab = ohe_categorical_index_vocab
self._mhe_categorical_index_vocab = mhe_categorical_index_vocab
self._model_type = None
self._label_col = None
self._feature_name_to_index_map = {}
self._feature_names = []
self._class_names = []
def _extract_model_metadata(self):
if 'model_type' not in self._model_metadata or self._model_metadata[
'model_type'] not in [
'boosted_tree_regressor', 'boosted_tree_classifier'
]:
raise ValueError('Invalid model_type in model_metadata')
self._model_type = self._model_metadata['model_type']
if 'label_col' not in self._model_metadata:
raise ValueError('label_col not found in model_metadata')
self._label_col = self._model_metadata['label_col']
if not self._model_metadata['features']:
raise ValueError('No feature found in model_metadata')
self._feature_names = self._model_metadata['feature_names']
if self._model_type == 'boosted_tree_classifier':
if 'class_names' not in self._model_metadata or not self._model_metadata[
'class_names']:
raise ValueError('No class_names found in model_metadata')
self._class_names = self._model_metadata['class_names']
for feature_index in range(len(self._feature_names)) :
feature_name = self._feature_names[feature_index]
self._feature_name_to_index_map[feature_name] = feature_index
feature_metadata = self._model_metadata['features'][feature_name]
if 'encode_type' not in feature_metadata or not feature_metadata[
'encode_type']:
continue
elif feature_metadata['encode_type'] == 'ohe':
if feature_index not in self._ohe_categorical_index_vocab:
raise ValueError(
'feature_index %d missing in _ohe_categorical_index_vocab' %
feature_index)
elif feature_metadata['encode_type'] == 'mhe':
if feature_index not in self._mhe_categorical_index_vocab:
raise ValueError(
'feature_index %d missing in _mhe_categorical_index_vocab' %
feature_index)
else:
raise ValueError('Invalid encode_type %s for feature %s' %
(feature_metadata['encode_type'], feature_name))
def _preprocess(self, data):
self._extract_model_metadata()
preprocessed_data = []
for row_index in range(len(data)) :
row = data[row_index]
sorted_data_feature_names = sorted(row.keys())
sorted_model_feature_names = sorted(self._feature_names)
if sorted_data_feature_names != sorted_model_feature_names:
raise ValueError(
'Row %d has different features %s than the model features %s' %
(row_index, ','.join(sorted_data_feature_names),
','.join(sorted_model_feature_names)))
encoded_row = []
for feature_name in self._feature_names:
col = row[feature_name]
feature_index = self._feature_name_to_index_map[feature_name]
if feature_index in self._ohe_categorical_index_vocab:
vocab = self._ohe_categorical_index_vocab[feature_index]
col_value = str(col)
if col_value in vocab:
encoded_row.append(float(vocab.index(col_value)))
else:
encoded_row.append(None)
elif feature_index in self._mhe_categorical_index_vocab:
vocab = self._mhe_categorical_index_vocab[feature_index]
mhe_list = [0.0] * len(vocab)
try:
for item in col:
item_value = str(item)
if item_value in vocab:
mhe_list[vocab.index(item_value)] = 1.0
encoded_row.extend(mhe_list)
except ValueError:
raise ValueError('The feature %s in row %d is not an array' %
(feature_name, row_index))
else:
try:
encoded_row.append(float(col))
except ValueError:
raise ValueError(
'The feature %s in row %d cannot be converted to float' %
(feature_name, row_index))
preprocessed_data.append(encoded_row)
return preprocessed_data
def predict(self, instances, **kwargs):
del kwargs
encoded = self._preprocess(instances)
prediction_input = xgb.DMatrix(
np.array(encoded ).reshape(( len(instances), -1)) , missing=None)
if self._model_type == 'boosted_tree_classifier':
outputs = self._model.predict(prediction_input)
final_outputs = []
for np_output in outputs:
output = np_output.tolist()
final_output = {}
final_output['predicted_{}'.format(
self._label_col)] = self._class_names[output.index(max(output)) ]
final_output['{}_values'.format(self._label_col)] = self._class_names
final_output['{}_probs'.format(self._label_col)] = output
final_outputs.append(final_output)
return final_outputs
else:
return {
'predicted_' + self._label_col:
self._model.predict(prediction_input ).tolist()
}
@classmethod
def from_path(cls, model_dir, model_name="model.bst", meta_name="model_metadata.json"):
model_path = os.path.join(model_dir, model_name)
model = xgb.Booster(model_file=model_path)
assets_path = model_dir
model_metadata_path = os.path.join(assets_path, meta_name)
with open(model_metadata_path)as f:
model_metadata = json.load(f)
txt_list = glob.glob(assets_path + '/*.txt')
ohe_categorical_index_vocab = {}
mhe_categorical_index_vocab = {}
for txt_file in txt_list:
ohe_feature_found = re.search(r'(\d+ ).txt', txt_file)
mhe_feature_found = re.search(r'(\d+)_array.txt', txt_file)
if ohe_feature_found:
feature_index = int(ohe_feature_found.group(1))
with open(txt_file)as f:
ohe_categorical_index_vocab[feature_index] = f.read().splitlines()
elif mhe_feature_found:
feature_index = int(mhe_feature_found.group(1))
with open(txt_file)as f:
mhe_categorical_index_vocab[feature_index] = f.read().splitlines()
return cls(model, model_metadata, ohe_categorical_index_vocab,
mhe_categorical_index_vocab )<feature_engineering>
|
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
12,110,917 |
class Iter_Valid(object):
def __init__(self, df, max_user=1000):
df = df.reset_index(drop=True)
self.df = df
self.user_answer = df['user_answer'].astype(str ).values
self.answered_correctly = df['answered_correctly'].astype(str ).values
df['prior_group_responses'] = "[]"
df['prior_group_answers_correct'] = "[]"
self.sample_df = df[df['content_type_id'] == 0][['row_id']]
self.sample_df['answered_correctly'] = 0
self.len = len(df)
self.user_id = df.user_id.values
self.task_container_id = df.task_container_id.values
self.content_type_id = df.content_type_id.values
self.max_user = max_user
self.current = 0
self.pre_user_answer_list = []
self.pre_answered_correctly_list = []
def __iter__(self):
return self
def fix_df(self, user_answer_list, answered_correctly_list, pre_start):
df= self.df[pre_start:self.current].copy()
sample_df = self.sample_df[pre_start:self.current].copy()
df.loc[pre_start,'prior_group_responses'] = '[' + ",".join(self.pre_user_answer_list)+ ']'
df.loc[pre_start,'prior_group_answers_correct'] = '[' + ",".join(self.pre_answered_correctly_list)+ ']'
self.pre_user_answer_list = user_answer_list
self.pre_answered_correctly_list = answered_correctly_list
return df, sample_df
def __next__(self):
added_user = set()
pre_start = self.current
pre_added_user = -1
pre_task_container_id = -1
pre_content_type_id = -1
user_answer_list = []
answered_correctly_list = []
while self.current < self.len:
crr_user_id = self.user_id[self.current]
crr_task_container_id = self.task_container_id[self.current]
crr_content_type_id = self.content_type_id[self.current]
if crr_user_id in added_user and(crr_user_id != pre_added_user or(crr_task_container_id != pre_task_container_id and crr_content_type_id == 0 and pre_content_type_id == 0)) :
return self.fix_df(user_answer_list, answered_correctly_list, pre_start)
if len(added_user)== self.max_user:
if crr_user_id == pre_added_user and(crr_task_container_id == pre_task_container_id or crr_content_type_id == 1):
user_answer_list.append(self.user_answer[self.current])
answered_correctly_list.append(self.answered_correctly[self.current])
self.current += 1
continue
else:
return self.fix_df(user_answer_list, answered_correctly_list, pre_start)
added_user.add(crr_user_id)
pre_added_user = crr_user_id
pre_task_container_id = crr_task_container_id
pre_content_type_id = crr_content_type_id
user_answer_list.append(self.user_answer[self.current])
answered_correctly_list.append(self.answered_correctly[self.current])
self.current += 1
if pre_start < self.current:
return self.fix_df(user_answer_list, answered_correctly_list, pre_start)
else:
raise StopIteration()<choose_model_class>
|
X_train = train.iloc[:,1:]
y_train = train.iloc[:,0]
|
Digit Recognizer
|
12,110,917 |
MAX_SEQ = 240
ACCEPTED_USER_CONTENT_SIZE = 2
EMBED_SIZE = 256
BATCH_SIZE = 64+32
DROPOUT = 0.1
class FFN(nn.Module):
def __init__(self, state_size = 200, forward_expansion = 1, bn_size = MAX_SEQ - 1, dropout=0.2):
super(FFN, self ).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, forward_expansion * state_size)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm1d(bn_size)
self.lr2 = nn.Linear(forward_expansion * state_size, state_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.relu(self.lr1(x))
x = self.bn(x)
x = self.lr2(x)
return self.dropout(x)
class FFN0(nn.Module):
def __init__(self, state_size = 200, forward_expansion = 1, bn_size = MAX_SEQ - 1, dropout=0.2):
super(FFN0, self ).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, forward_expansion * state_size)
self.relu = nn.ReLU()
self.lr2 = nn.Linear(forward_expansion * state_size, state_size)
self.layer_normal = nn.LayerNorm(state_size)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = self.lr1(x)
x = self.relu(x)
x = self.lr2(x)
x=self.layer_normal(x)
return self.dropout(x)
def future_mask(seq_length):
future_mask =(np.triu(np.ones([seq_length, seq_length]), k = 1)).astype('bool')
return torch.from_numpy(future_mask)
class TransformerBlock(nn.Module):
def __init__(self, embed_dim, heads = 8, dropout = DROPOUT, forward_expansion = 1):
super(TransformerBlock, self ).__init__()
self.multi_att = nn.MultiheadAttention(embed_dim=embed_dim, num_heads=heads, dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.layer_normal = nn.LayerNorm(embed_dim)
self.ffn = FFN(embed_dim, forward_expansion = forward_expansion, dropout=dropout)
self.ffn0 = FFN0(embed_dim, forward_expansion = forward_expansion, dropout=dropout)
self.layer_normal_2 = nn.LayerNorm(embed_dim)
def forward(self, value, key, query, att_mask):
att_output, att_weight = self.multi_att(value, key, query, attn_mask=att_mask)
att_output = self.dropout(self.layer_normal(att_output + value))
att_output = att_output.permute(1, 0, 2)
x = self.ffn(att_output)
x1 = self.ffn0(att_output)
x = self.dropout(self.layer_normal_2(x + x1 + att_output))
return x.squeeze(-1), att_weight
class Encoder(nn.Module):
def __init__(self, n_skill, max_seq=100, embed_dim=128, dropout = DROPOUT, forward_expansion = 1, num_layers=1, heads = 8):
super(Encoder, self ).__init__()
self.n_skill, self.embed_dim = n_skill, embed_dim
self.embedding = nn.Embedding(2 * n_skill + 1, embed_dim)
self.pos_embedding = nn.Embedding(max_seq - 1, embed_dim)
self.e_embedding = nn.Embedding(n_skill+1, embed_dim)
self.layers = nn.ModuleList([TransformerBlock(embed_dim, forward_expansion = forward_expansion)for _ in range(num_layers)])
self.dropout = nn.Dropout(dropout)
def forward(self, x, question_ids):
device = x.device
x = self.embedding(x)
pos_id = torch.arange(x.size(1)).unsqueeze(0 ).to(device)
pos_x = self.pos_embedding(pos_id)
x = self.dropout(x + pos_x)
x = x.permute(1, 0, 2)
e = self.e_embedding(question_ids)
e = e.permute(1, 0, 2)
for layer in self.layers:
att_mask = future_mask(e.size(0)).to(device)
x, att_weight = layer(e, x, x, att_mask=att_mask)
x = x.permute(1, 0, 2)
x = x.permute(1, 0, 2)
return x, att_weight
class SAKTModel(nn.Module):
def __init__(self, n_skill, max_seq=100, embed_dim=128, dropout = DROPOUT, forward_expansion = 1, enc_layers=1, heads = 8):
super(SAKTModel, self ).__init__()
self.encoder = Encoder(n_skill, max_seq, embed_dim, dropout, forward_expansion, num_layers=enc_layers)
self.pred = nn.Linear(embed_dim, 1)
def forward(self, x, question_ids):
x, att_weight = self.encoder(x, question_ids)
x = self.pred(x)
return x.squeeze(-1), att_weight
class TestDataset(Dataset):
def __init__(self, samples, test_df, n_skill, max_seq=100):
super(TestDataset, self ).__init__()
self.samples, self.user_ids, self.test_df = samples, [x for x in test_df["user_id"].unique() ], test_df
self.n_skill, self.max_seq = n_skill, max_seq
def __len__(self):
return self.test_df.shape[0]
def __getitem__(self, index):
test_info = self.test_df.iloc[index]
user_id = test_info['user_id']
target_id = test_info['content_id']
content_id_seq = np.zeros(self.max_seq, dtype=int)
answered_correctly_seq = np.zeros(self.max_seq, dtype=int)
if user_id in self.samples.index:
content_id, answered_correctly = self.samples[user_id]
seq_len = len(content_id)
if seq_len >= self.max_seq:
content_id_seq = content_id[-self.max_seq:]
answered_correctly_seq = answered_correctly[-self.max_seq:]
else:
content_id_seq[-seq_len:] = content_id
answered_correctly_seq[-seq_len:] = answered_correctly
x = content_id_seq[1:].copy()
x +=(answered_correctly_seq[1:] == 1)* self.n_skill
questions = np.append(content_id_seq[2:], [target_id])
return x, questions
<define_variables>
|
X_train = X_train.values.reshape(-1, 28, 28, 1)/255.
test = test.values.reshape(-1, 28, 28, 1)/255.
y_train = to_categorical(y_train, 10 )
|
Digit Recognizer
|
12,110,917 |
class config:
FOLD = 0
ROOT_PATH = "/kaggle/input/riiid-xgboost-model-and-features"
MODEL_NAME = "xgb_v17_06_f0"
validaten_flg = False
DDOF = 1<load_pretrained>
|
random_seed = 0
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=random_seed )
|
Digit Recognizer
|
12,110,917 |
model_path = f"{config.ROOT_PATH}/{config.MODEL_NAME}/{config.MODEL_NAME}"
model_name = f"{config.MODEL_NAME}_model.bst"
model_meta = f"{config.MODEL_NAME}_assets_model_metadata.json"
model = Predictor.from_path(model_path, model_name=model_name, meta_name=model_meta)
model._extract_model_metadata()
feature_names = model._feature_names
categorical_features = [feature_names[i] for i in model._ohe_categorical_index_vocab.keys() ]
print("features:", len(feature_names))
categorical_features
print(model._mhe_categorical_index_vocab)
assert len(model._mhe_categorical_index_vocab)== 0<load_pretrained>
|
datagen = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1
)
|
Digit Recognizer
|
12,110,917 |
group = joblib.load("/kaggle/input/riiid-sakt-model/group.pkl.zip")
n_skill = joblib.load("/kaggle/input/riiid-sakt-model/skills.pkl.zip")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def create_model() :
return SAKTModel(n_skill, max_seq=MAX_SEQ, embed_dim=EMBED_SIZE, forward_expansion=1, enc_layers=1, heads=4, dropout=0.1)
sakt_model = create_model()
sakt_model.load_state_dict(torch.load("/kaggle/input/riiid-sakt-model/sakt_model.pt"))
sakt_model.to(device)
sakt_model.eval()
sakt_model_b = create_model()
sakt_model_b.load_state_dict(torch.load("/kaggle/input/riiid-sakt-model/best_sakt_model_1.pt"))
sakt_model_b.to(device)
sakt_model_b.eval()
print("all model loaded" )<load_from_csv>
|
model = Sequential()
model.add(Conv2D(32,(5,5), padding='same', input_shape=X_train.shape[1:], activation='relu'))
model.add(Conv2D(32,(5,5), padding='same', activation='relu'))
model.add(MaxPool2D(2,2))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(2,2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary()
|
Digit Recognizer
|
12,110,917 |
content_agg_feats = pd.read_csv(f"{config.ROOT_PATH}/content_agg_feats.csv")
question_tags_ohe = pd.read_csv(f"{config.ROOT_PATH}/question_tags_ohe.csv")
lecture_tags_ohe = pd.read_csv(f"{config.ROOT_PATH}/lecture_tags_ohe.csv")
questions = pd.read_csv("/kaggle/input/riiid-test-answer-prediction/questions.csv")
question_tags_ohe = question_tags_ohe.rename(columns={'question_id':'content_id'})
lecture_tags_ohe = lecture_tags_ohe.rename(columns={'lecture_id':'content_id'})
questions = questions.rename(columns={'question_id':'content_id'} ).drop(["bundle_id", "correct_answer", "tags"], axis=1)
content_agg_feats = content_agg_feats.merge(question_tags_ohe, how="left", on="content_id")
content_agg_feats = content_agg_feats.merge(questions, how="left", on="content_id")
content_agg_feats = content_agg_feats.merge(lecture_tags_ohe, how="outer", on="content_id")
content_agg_feats = content_agg_feats.fillna(0)
content_agg_feats_v = content_agg_feats.values
content_agg_feats_c = content_agg_feats.columns.values[1:]
q_ohe_dic = {i: v for i, v in enumerate(question_tags_ohe.set_index("content_id" ).values)}
l_ohe_dic = {i: row.values for i, row in lecture_tags_ohe.drop("type_of", axis=1 ).set_index("content_id" ).iterrows() }
del lecture_tags_ohe, question_tags_ohe, questions, content_agg_feats
gc.collect()<load_from_csv>
|
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
|
Digit Recognizer
|
12,110,917 |
user_agg_feats_even = pd.read_csv(f"{config.ROOT_PATH}/user_agg_feat_even.csv")
user_agg_feats_odd = pd.read_csv(f"{config.ROOT_PATH}/user_agg_feat_odd.csv")
user_agg_feats_df = pd.concat([user_agg_feats_even, user_agg_feats_odd])
user_agg_feats_v = user_agg_feats_df.values
del user_agg_feats_df, user_agg_feats_even, user_agg_feats_odd
gc.collect()<load_from_csv>
|
EPOCHS = 30
BATCH_SIZE = 20
callback_list = [
ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1),
EarlyStopping(monitor='val_loss', min_delta=0.0005, patience=4)
]
history = model.fit(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
epochs=EPOCHS,
callbacks=callback_list,
validation_data=(X_val, y_val),
steps_per_epoch=X_train.shape[0] // BATCH_SIZE )
|
Digit Recognizer
|
12,110,917 |
user_last_timestamp = pd.read_csv(f"{config.ROOT_PATH}/user_last_timestamp.csv")
last_timestamp_dic = {k: v for k, v in user_last_timestamp.values}
del user_last_timestamp
gc.collect()<load_pretrained>
|
results = model.predict(test)
results = np.argmax(results, axis=1)
results = pd.Series(results, name='Label')
submission = pd.concat([pd.Series(range(1,28001), name='ImageID'), results], axis=1)
submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
11,858,028 |
WINDOW = config.ROOT_PATH
with open(f"{WINDOW}/user_all_count.pkl", "rb")as f:
user_all_count = pickle.load(f)
with open(f"{WINDOW}/user_correct_window_200.pkl", "rb")as f:
user_correct_window_200 = pickle.load(f)
with open(f"{WINDOW}/prior_question_elapsed_time_window_dict.pkl", "rb")as f:
prior_question_elapsed_time_window_dict = pickle.load(f)
with open(f"{WINDOW}/prior_question_had_explanation_count.pkl", "rb")as f:
prior_question_had_explanation_count = pickle.load(f)
with open(f"{WINDOW}/prior_question_had_explanation_window_dict.pkl", "rb")as f:
prior_question_had_explanation_window_dict = pickle.load(f)
with open(f"{WINDOW}/timediff_window_dict.pkl", "rb")as f:
timediff_window_dict = pickle.load(f )<define_variables>
|
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
11,858,028 |
col1 = [f"work_q_tag_{i}_v3" for i in range(188)]
col2 = [f"cumsum_q_tag_{i}_v3" for i in range(188)]
col3 = [f"work_l_tag_{i}_v2" for i in range(188)]
user_agg_feats_c = col1 + col2 + col3
rate_col = [f"correct_rate_q_tag_{i}" for i in range(188)]<feature_engineering>
|
X = train.drop('label',axis = 1)
y = train.label
|
Digit Recognizer
|
11,858,028 |
def get_content_feature(_content_id):
idx = np.where(content_agg_feats_v[:,0] == _content_id)[0][0]
v = content_agg_feats_v[idx, 1:]
return v.tolist()
def get_user_feature(_user_id):
idx = np.where(user_agg_feats_v[:,0] == _user_id)[0]
if len(idx)== 0:
return np.zeros(user_agg_feats_v.shape[1] - 1)
else:
idx = idx[0]
v = user_agg_feats_v[idx, 1:]
return v.tolist()
def get_timediff(row):
_timestamp = row["timestamp"]
_user_id = row["user_id"]
try:
return _timestamp - last_timestamp_dic[_user_id]
except KeyError:
return 0
def get_lgbm_window_feat(_user_id):
try:
v = prior_question_elapsed_time_window_dict[_user_id]
v = np.array(v)[~np.isnan(v)]
prior_question_elapsed_time_std_w200 = v.std(ddof=config.DDOF)
prior_question_elapsed_time_avg_w200 = v.mean()
prior_question_had_explanation_std_w200 = np.std(prior_question_had_explanation_window_dict[_user_id], ddof=config.DDOF)
prior_question_had_explanation_avg_w200 = np.mean(prior_question_had_explanation_window_dict[_user_id])
timediff_std_w200 = np.std(timediff_window_dict[_user_id], ddof=config.DDOF)
timediff_avg_w200 = np.mean(timediff_window_dict[_user_id])
_prior_question_had_explanation_count = prior_question_had_explanation_count[_user_id]
except KeyError:
prior_question_elapsed_time_std_w200 = 0
prior_question_elapsed_time_avg_w200 = 0
prior_question_had_explanation_std_w200 = 0
prior_question_had_explanation_avg_w200 = 0
timediff_std_w200 = 0
timediff_avg_w200 = 0
_prior_question_had_explanation_count = 0
return [
prior_question_elapsed_time_std_w200,
prior_question_elapsed_time_avg_w200,
prior_question_had_explanation_std_w200,
prior_question_had_explanation_avg_w200,
timediff_std_w200,
timediff_avg_w200,
_prior_question_had_explanation_count,
]<prepare_x_and_y>
|
y = to_categorical(y )
|
Digit Recognizer
|
11,858,028 |
def update_infomation(row):
global user_agg_feats_v
_user_id = row["user_id"]
_timestamp = row["timestamp"]
_content_id = row["content_id"]
_answered_correctly = row["answered_correctly"]
_content_type_id = row["content_type_id"]
try:
_prior_question_had_explanation = int(row["prior_question_had_explanation"])
except TypeError:
_prior_question_had_explanation = 0
try:
_prior_question_elapsed_time = float(row["prior_question_elapsed_time"])
except TypeError:
_prior_question_elapsed_time = 0
try:
_timediff = _timestamp - last_timestamp_dic[_user_id]
except KeyError:
_timediff = 0
last_timestamp_dic[_user_id] = _timestamp
if _content_type_id == 0:
n_work = q_ohe_dic[_content_id]
n_correct = n_work * _answered_correctly
n_lecture = np.zeros(188)
else:
n_work = np.zeros(188)
n_correct = np.zeros(188)
n_lecture = l_ohe_dic[_content_id]
tag_feats = np.hstack([n_work, n_correct, n_lecture])
idx = np.where(user_agg_feats_v[:,0] == _user_id)[0]
if len(idx)== 0:
append_v = np.hstack([_user_id, tag_feats] ).astype(int)
user_agg_feats_v = np.vstack([user_agg_feats_v, append_v])
else:
idx = idx[0]
user_agg_feats_v[idx, 1:] += tag_feats.astype(int)
try:
prior_question_had_explanation_count[_user_id] += _prior_question_had_explanation
user_all_count[_user_id][0] += 1
user_all_count[_user_id][1] += int(_answered_correctly == 1)
except KeyError:
prior_question_had_explanation_count[_user_id] = _prior_question_had_explanation
user_all_count[_user_id] = [1, int(_answered_correctly == 1)]
try:
if len(user_correct_window_200[_user_id])== 201:
user_correct_window_200[_user_id].pop(0)
prior_question_elapsed_time_window_dict[_user_id].pop(0)
prior_question_had_explanation_window_dict[_user_id].pop(0)
timediff_window_dict[_user_id].pop(0)
user_correct_window_200[_user_id].append(int(_answered_correctly == 1))
prior_question_elapsed_time_window_dict[_user_id].append(_prior_question_elapsed_time)
prior_question_had_explanation_window_dict[_user_id].append(_prior_question_had_explanation)
timediff_window_dict[_user_id].append(_timediff)
except KeyError:
user_correct_window_200[_user_id] = [int(_answered_correctly == 1)]
prior_question_elapsed_time_window_dict[_user_id] = [_prior_question_elapsed_time]
prior_question_had_explanation_window_dict[_user_id] = [_prior_question_had_explanation]
timediff_window_dict[_user_id] = [_timediff]<load_pretrained>
|
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.25,random_state = 123 )
|
Digit Recognizer
|
11,858,028 |
if config.validaten_flg:
target_df = pd.read_pickle('.. /input/riiid-cross-validation-files/cv1_valid.pickle')
iter_test = Iter_Valid(target_df, max_user=1000)
predicted = []
def set_predict(df):
predicted.append(df)
user_agg_feats_v = user_agg_feats_v[:10000]
last_timestamp_dic = {k: last_timestamp_dic[k] for k in user_agg_feats_v[:, 0]}
user_correct_window_200 = {k: user_correct_window_200[k] for k in user_agg_feats_v[:, 0]}
user_all_count = {k: user_all_count[k] for k in user_agg_feats_v[:, 0]}
prior_question_had_explanation_count = {k: prior_question_had_explanation_count[k] for k in user_agg_feats_v[:, 0]}
user_correct_window_200 = {k: user_correct_window_200[k] for k in user_agg_feats_v[:, 0]}
prior_question_elapsed_time_window_dict = {k: prior_question_elapsed_time_window_dict[k] for k in user_agg_feats_v[:, 0]}
prior_question_had_explanation_window_dict = {k: prior_question_had_explanation_window_dict[k] for k in user_agg_feats_v[:, 0]}
timediff_window_dict = {k: timediff_window_dict[k] for k in user_agg_feats_v[:, 0]}
else:
env = riiideducation.make_env()
iter_test = env.iter_test()
set_predict = env.predict<set_options>
|
model = Sequential()
model.add(Conv2D(32,kernel_size =(3,3),activation = 'relu',input_shape =(28,28,1)))
model.add(Conv2D(64,kernel_size =(3,3),activation = 'relu'))
model.add(Flatten())
model.add(Dense(10,activation = 'softmax'))
|
Digit Recognizer
|
11,858,028 |
print(psutil.virtual_memory().percent )<install_modules>
|
model.compile(optimizer = 'adam',loss = 'categorical_crossentropy',metrics = ['accuracy'] )
|
Digit Recognizer
|
11,858,028 |
!pip install.. /input/lgbm-inference-db-full-data/pickle5-0.0.11/<import_modules>
|
history = model.fit(X_train,y_train,validation_data =(X_test,y_test),epochs = 20 )
|
Digit Recognizer
|
11,858,028 |
import pandas as pd
import numpy as np
import gc
from sklearn.metrics import roc_auc_score
from collections import defaultdict
from tqdm.notebook import tqdm
import lightgbm as lgb
import pickle5 as pickle
from numba import jit<categorify>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
11,858,028 |
@contextmanager
def timer(name):
t0 = time.time()
yield
print('
[{}] done in {} Minutes
'.format(name, round(( time.time() - t0)/ 60, 2)) )<define_variables>
|
model.compile(optimizer = 'Adam' , loss = "categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
11,858,028 |
train_pickle = '.. /input/lgbm-inference-db-full-data/train_df.pickle'
question_file = '.. /input/lgbm-inference-db-full-data/question_features.csv'
ms_in_a_day = 8.64 * 10 ** 7
prior_question_elapsed_time_mean = 25439.41<compute_train_metric>
|
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
11,858,028 |
left_asymptote = 0.25
@jit(nopython=True)
def get_new_theta(is_good_answer, beta, left_asymptote, theta, nb_previous_answers):
return theta + learning_rate_theta(nb_previous_answers)*(
is_good_answer - probability_of_good_answer(theta, beta, left_asymptote))
@jit(nopython=True)
def learning_rate_theta(nb_answers):
return max(0.3 /(1 + 0.01 * nb_answers), 0.04)
@jit(nopython=True)
def probability_of_good_answer(theta, beta, left_asymptote):
return left_asymptote +(1 - left_asymptote)* sigmoid(theta - beta)
@jit(nopython=True)
def sigmoid(x):
return 1 /(1 + np.exp(-x))<categorify>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
batch_size = 256
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=batch_size),
epochs = 30, validation_data =(X_test,y_test),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction] )
|
Digit Recognizer
|
11,858,028 |
def calc_user_feats_test(df, bundle_count, temp_values):
attempt_no_array = np.zeros(len(df), dtype=np.int32)
last_lecture_time_array = np.zeros(len(df), dtype=np.float64)
last_incorrect_time_array = np.zeros(len(df), dtype=np.float64)
acsu = np.zeros(len(df), dtype=np.int32)
acsu_part = np.zeros(len(df), dtype=np.int32)
cu = np.zeros(len(df), dtype=np.int32)
cu_part = np.zeros(len(df), dtype=np.int32)
tu_part = np.zeros(len(df), dtype=np.int32)
lag_time_array = np.zeros(len(df), dtype=np.int64)
wait_time_array = np.zeros(len(df), dtype=np.float64)
theta_array = np.zeros(len(df), dtype=np.float32)
beta_array = np.zeros(len(df), dtype=np.float32)
difficulty_correct_array = np.zeros(len(df), dtype=np.float64)
difficulty_incorrect_array = np.zeros(len(df), dtype=np.float64)
feature_cols = ['user_id', 'prior_question_elapsed_time', 'timestamp',
'content_id', 'content_type_id', 'part', 'bundle_id',
'mean_content_accuracy_sm']
for cnt, row in enumerate(df[feature_cols].values):
if row[2] == 0:
lag_time_array[cnt] = 0
prior_question_lag_time[row[0]] = np.nan
wait_time_array[cnt] = np.nan
elif row[2] == user_last_timestamp[row[0]]:
wait_time_array[cnt] = temp_values[5]
lag_time_array[cnt] = row[2] - user_last_timestamp_traceback[row[0]]
else:
lag_time_array[cnt] = row[2] - user_last_timestamp[row[0]]
if(lag_time_array[cnt] == 0)|(lag_time_array[cnt] == row[2])|(len(prior_bundle_count[row[0]])!= 2):
wait_time_array[cnt] = np.nan
else:
wait_time_array[cnt] = prior_question_lag_time[row[0]] - prior_bundle_count[row[0]][1] * row[1]
user_last_timestamp_traceback[row[0]] = user_last_timestamp[
row[0]]
user_last_timestamp[row[0]] = row[2]
if row[4] == 1:
last_lecture_time[row[0]] = row[3]
else:
if row[6] in bundles:
if len(prior_bundle_count[row[0]])== 2:
if row[6] == prior_bundle_count[row[0]][0]:
bundle_count += 1
bundle_flg = True
save_temp_value_flg = False
else:
bundle_count = 1
bundle_flg = False
save_temp_value_flg = True
else:
bundle_count = 1
bundle_flg = False
save_temp_value_flg = True
else:
bundle_count = 1
bundle_flg = False
save_temp_value_flg = False
prior_question_lag_time[row[0]] = lag_time_array[cnt]
if save_temp_value_flg:
temp_values[0] = answered_correctly_sum_user_dict['total'][row[0]]
temp_values[1] = answered_correctly_sum_user_dict[int(row[5])][row[0]]
temp_values[2] = question_count_dict['total'][row[0]]
temp_values[3] = question_count_dict[int(row[5])][row[0]]
temp_values[4] = last_incorrect_time[row[0]]
temp_values[5] = wait_time_array[cnt]
temp_values[6] = beta_dict[row[3]]
temp_values[7] = theta_dict[row[0]]
temp_values[8] = difficulty_dict[row[0]]['correct']
temp_values[9] = difficulty_dict[row[0]]['incorrect']
if bundle_flg:
acsu[cnt] = temp_values[0]
cu[cnt] = temp_values[2]
difficulty_correct_array[cnt] = temp_values[8]
difficulty_incorrect_array[cnt] = temp_values[9]
acsu_part[cnt] = temp_values[1]
cu_part[cnt] = temp_values[3]
theta_array[cnt] = temp_values[7]
beta_array[cnt] = temp_values[6]
if row[2] == 0:
last_incorrect_time_array[cnt] = np.nan
else:
last_incorrect_time_array[cnt] = row[2] - temp_values[4]
else:
acsu[cnt] = answered_correctly_sum_user_dict['total'][row[0]]
cu[cnt] = question_count_dict['total'][row[0]]
difficulty_correct_array[cnt] = difficulty_dict[row[0]]['correct']
difficulty_incorrect_array[cnt] = difficulty_dict[row[0]]['incorrect']
acsu_part[cnt] = answered_correctly_sum_user_dict[int(row[5])][row[0]]
cu_part[cnt] = question_count_dict[int(row[5])][row[0]]
if row[2] == 0:
last_incorrect_time_array[cnt] = np.nan
else:
last_incorrect_time_array[cnt] = row[2] - last_incorrect_time[row[0]]
beta_array[cnt] = beta_dict[row[3]]
theta_array[cnt] = theta_dict[row[0]]
tu_part[cnt] = user_time_dict[int(row[5])][row[0]]
if bundle_count == 1:
attempt_dict[row[0]][row[6]] += 1
attempt_no_array[cnt] = attempt_dict[row[0]][row[6]]
if last_lecture_time[row[0]] == 0:
last_lecture_time_array[cnt] = np.nan
else:
last_lecture_time_array[cnt] = row[2] - last_lecture_time[row[0]]
if np.isnan(row[1]):
user_time_dict[int(row[5])][row[0]] += 0
else:
user_time_dict[int(row[5])][row[0]] += row[1]
prior_bundle_count[row[0]] =(row[6], bundle_count)
df['attempt_no'] = attempt_no_array
df['last_lecture_time'] = last_lecture_time_array
df['last_incorrect_time'] = last_incorrect_time_array
df['lag_time'] = lag_time_array
df['prior_question_wait_time'] = wait_time_array
df['theta'] = theta_array
df['beta'] = beta_array
user_feats_df = pd.DataFrame({'answered_correctly_sum_user': acsu, 'answered_count': cu,
'answered_correctly_sum_user_part': acsu_part, 'answered_count_part': cu_part,
'total_time_spent_user_part': tu_part,
'difficulty_correct_count': difficulty_correct_array,
'difficulty_incorrect_count': difficulty_incorrect_array
})
user_feats_df['mean_user_accuracy'] = user_feats_df['answered_correctly_sum_user'] / user_feats_df['answered_count']
user_feats_df['mean_user_accuracy_part'] = user_feats_df['answered_correctly_sum_user_part'] / user_feats_df[
'answered_count_part']
user_feats_df['mean_user_spent_time_part'] = user_feats_df['total_time_spent_user_part'] / user_feats_df[
'answered_count_part']
user_feats_df.loc[user_feats_df['answered_count_part'] == 0, 'mean_user_spent_time_part'] = np.nan
user_feats_df['difficulty_correct'] = user_feats_df['difficulty_correct_count'] / user_feats_df['answered_correctly_sum_user']
user_feats_df['difficulty_incorrect'] = user_feats_df['difficulty_incorrect_count'] / \
(user_feats_df['answered_count'] - user_feats_df['answered_correctly_sum_user'])
user_feats_df['difficulty_diff'] = user_feats_df['difficulty_correct'] - user_feats_df['difficulty_incorrect']
user_feats_df.drop(columns=['difficulty_correct_count', 'difficulty_incorrect_count', 'difficulty_correct'], inplace=True)
feats_cols = user_feats_df.columns
for col in feats_cols:
df[col] = user_feats_df[col].values
df['hmean_user_content_accuracy'] = 2 *(df['mean_user_accuracy'] * df['mean_content_accuracy_sm'])/ \
(df['mean_user_accuracy'] + df['mean_content_accuracy_sm'])
df.loc[df['prior_question_wait_time'] < 0, 'prior_question_wait_time'] = 0
return df, bundle_count, temp_values
<define_variables>
|
predictions = model.predict_classes(test, verbose=1 )
|
Digit Recognizer
|
11,858,028 |
def update_user_feats(df):
bundle_count = 1
for row in df[['user_id', 'answered_correctly', 'content_type_id', 'timestamp',
'part', 'content_id', 'answered_count', 'mean_content_accuracy_sm']].values:
if row[2] == 0:
answered_correctly_sum_user_dict['total'][row[0]] += row[1]
answered_correctly_sum_user_dict[int(row[4])][row[0]] += row[1]
question_count_dict['total'][row[0]] += 1
question_count_dict[int(row[4])][row[0]] += 1
theta = theta_dict[row[0]]
beta = beta_dict[row[5]]
theta_dict[row[0]] = get_new_theta(row[1], beta, left_asymptote, theta, row[6])
if row[1] == 0:
last_incorrect_time[row[0]] = row[3]
difficulty_dict[row[0]]['incorrect'] += row[7]
else:
difficulty_dict[row[0]]['correct'] += row[7]<load_pretrained>
|
prediction = pd.DataFrame({"ImageId":list(range(1,len(predictions)+1)) ,"Label":predictions} )
|
Digit Recognizer
|
11,858,028 |
with open(train_pickle, 'rb')as file:
df = pickle.load(file )<categorify>
|
prediction.to_csv('kaggle_submission.csv',index=False,header=True)
prediction
|
Digit Recognizer
|
11,858,028 |
def multi_level_dict() :
return defaultdict(int)
attempt_dict = defaultdict(multi_level_dict )<categorify>
|
model = Sequential()
model.add(Conv2D(64,(3,3),activation='relu',input_shape=(28,28,1)))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(256,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(128,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(64,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(10,activation='softmax'))
|
Digit Recognizer
|
11,858,028 |
def multi_level_float_dict() :
return defaultdict(float )<data_type_conversions>
|
model.compile(RMSprop(lr=0.001,rho=0.9),loss='categorical_crossentropy',metrics=['accuracy'] )
|
Digit Recognizer
|
11,858,028 |
with timer("counting"):
keys = np.sort(df['user_id'].unique())
total = len(keys)
user_bundle = df.groupby('user_id')['bundle_id'].apply(np.array ).apply(np.sort ).apply(np.unique)
user_attempts = df.groupby(['user_id', 'bundle_id'])['bundle_id'].count().astype(np.uint8 ).groupby('user_id' ).apply(np.array)
for user_id, bundle, attempt in tqdm(zip(keys, user_bundle, user_attempts), total=total):
attempt_dict[user_id] = defaultdict(int, zip(bundle, attempt))
del user_bundle, user_attempts, df, bundle, attempt
gc.collect()<load_from_csv>
|
train_datagen = ImageDataGenerator(rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=False,
fill_mode='nearest')
train_datagen.fit(X_train)
train_generator = train_datagen.flow(X_train,y_train,batch_size=128)
|
Digit Recognizer
|
11,858,028 |
<load_from_csv><EOS>
|
earlystop = EarlyStopping(monitor='val_loss',patience=2,verbose=1)
learning_reduce = ReduceLROnPlateau(patience=2,monitor="val_acc",verbose=1,min_lr=0.00001,factor=0.5)
callbacks = [learning_reduce]
history = model.fit_generator(train_generator,epochs=30,verbose=1,validation_data=(X_test,y_test),callbacks=callbacks )
|
Digit Recognizer
|
11,949,480 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
|
BATCH_SIZE = 64
VALID_BATCH_SIZE = 100
TEST_BATCH_SIZE = 100
EPOCHS = 5
NUM_CLASSES = 10
SEED = 42
EARLY_STOPPING = 25
OUTPUT_DIR = '/kaggle/working/'
MODEL_NAME = 'efficientnet-b0'
|
Digit Recognizer
|
11,949,480 |
TARGET = 'answered_correctly'
FEATS_1 = ['mean_user_accuracy',
'answered_count',
'mean_content_accuracy_sm',
'prior_question_elapsed_time',
'last_incorrect_time', 'prior_question_wait_time',
'content_freq_encoding',
'lag_time',
'attempt_no', 'last_lecture_time',
'mean_user_spent_time_part',
'answered_correctly_sum_user_part',
'mean_user_accuracy_part',
'part', 'theta', 'beta',
'question_avg_explanation_sm',
'question_avg_elapsed_time_sm',
'tags_lsi',
'difficulty_incorrect', 'difficulty_diff'
]
categorical_features = ['part', 'tags_lsi']<define_variables>
|
!pip install efficientnet-pytorch
|
Digit Recognizer
|
11,949,480 |
FEATS_2 = ['mean_user_accuracy',
'answered_correctly_sum_user',
'answered_count',
'mean_content_accuracy_sm',
'prior_question_elapsed_time',
'hmean_user_content_accuracy',
'last_incorrect_time', 'prior_question_wait_time',
'content_freq_encoding',
'lag_time',
'attempt_no', 'last_lecture_time',
'mean_user_spent_time_part',
'answered_correctly_sum_user_part',
'mean_user_accuracy_part',
'part', 'theta', 'beta',
'question_avg_explanation_sm',
'question_avg_elapsed_time_sm',
'tags_lsi',
'difficulty_incorrect',
]<define_variables>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from sklearn.metrics import accuracy_score
from PIL import Image, ImageOps, ImageEnhance
from efficientnet_pytorch import EfficientNet
|
Digit Recognizer
|
11,949,480 |
model_1 = lgb.Booster(model_file='.. /input/lgbm-inference-db-full-data/lightgbm_v11.5.txt' )<define_variables>
|
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
print('Shape of the training data: ', train.shape)
print('Shape of the test data: ', test.shape )
|
Digit Recognizer
|
11,949,480 |
model_2 = lgb.Booster(model_file='.. /input/lgbm-inference-db-full-data/lightgbm_v11.6.txt' )<predict_on_test>
|
train_df, valid_df = train_test_split(train, test_size = 0.2, random_state=SEED,stratify=train['label'] )
|
Digit Recognizer
|
11,949,480 |
env = riiideducation.make_env()
iter_test = env.iter_test()
set_predict = env.predict<concatenate>
|
n_pixels = len(train_df.columns)- 1
class MNIST_Dataset(Dataset):
def __init__(self, df
):
if len(df.columns)== n_pixels:
self.X = df.values.reshape(( -1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = None
self.X3 = np.full(( self.X.shape[0], 3, 28, 28), 0.0)
for i, s in enumerate(self.X):
self.X3[i] = np.moveaxis(cv2.cvtColor(s, cv2.COLOR_GRAY2RGB), -1, 0)
else:
self.X = df.iloc[:,1:].values.reshape(( -1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = torch.from_numpy(df.iloc[:,0].values)
self.X3 = np.full(( self.X.shape[0], 3, 28, 28), 0.0)
for i, s in enumerate(self.X):
self.X3[i] = np.moveaxis(cv2.cvtColor(s, cv2.COLOR_GRAY2RGB), -1, 0)
def __len__(self):
return len(self.X3)
def __getitem__(self, idx):
if self.y is not None:
return self.X3[idx] , self.y[idx]
else:
return self.X3[idx]
|
Digit Recognizer
|
11,949,480 |
previous_test_df = None
for(test_df, sample_prediction_df)in iter_test:
test_df = pd.concat([test_df.reset_index(drop=True),
questions_df.reindex(test_df['content_id'].values ).reset_index(drop=True)], axis=1)
test_df = pd.concat([test_df.reset_index(drop=True),
part_df.reindex(test_df['part'].values ).reset_index(drop=True)], axis=1)
if previous_test_df is not None:
previous_test_df[TARGET] = eval(test_df["prior_group_answers_correct"].iloc[0])
update_user_feats(previous_test_df)
else:
bundle_count = 1
temp_values = np.empty(10)* np.nan
test_df, bundle_count, temp_values = calc_user_feats_test(test_df, bundle_count, temp_values)
previous_test_df = test_df.copy()
test_df = test_df[test_df['content_type_id'] == 0].reset_index(drop=True)
test_df['prior_question_had_explanation'] = test_df.prior_question_had_explanation.fillna(False ).astype('int8')
test_df['part'] = test_df.part.fillna(False ).astype('int8')
test_df['prior_question_elapsed_time'] = test_df.prior_question_elapsed_time.fillna(prior_question_elapsed_time_mean)
for col in categorical_features:
test_df[col] = test_df[col].astype('category')
test_df[TARGET] = model_1.predict(test_df[FEATS_1])* 0.5 + model_2.predict(test_df[FEATS_2])* 0.5
set_predict(test_df[['row_id', TARGET]] )<split>
|
train_dataset = MNIST_Dataset(train_df)
valid_dataset = MNIST_Dataset(valid_df)
test_dataset = MNIST_Dataset(test)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
batch_size=VALID_BATCH_SIZE, shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=TEST_BATCH_SIZE, shuffle=False )
|
Digit Recognizer
|
11,949,480 |
env = riiideducation.make_env()
iter_test = env.iter_test()<import_modules>
|
def get_model(model_name='efficientnet-b0'):
model = EfficientNet.from_pretrained(model_name)
del model._fc
model._fc = nn.Linear(1280, NUM_CLASSES)
return model
|
Digit Recognizer
|
11,949,480 |
import sys
import numpy as np<set_options>
|
def set_seed(seed: int = 42):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed )
|
Digit Recognizer
|
11,949,480 |
warnings.filterwarnings("ignore")
<set_options>
|
set_seed(SEED)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
output_dir = OUTPUT_DIR
model = get_model(MODEL_NAME)
model = model.to(device)
optimizer = optim.Adam(model.parameters() , lr=0.001)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
loss_func = nn.CrossEntropyLoss()
if torch.cuda.is_available() :
model = model.cuda()
loss_func = loss_func.cuda()
best_val_accuracy = 0
min_val_loss = np.inf
best_epoch = 0
batches = 0
epochs_no_improve = 0
n_epochs_stop = EARLY_STOPPING
for epoch in range(EPOCHS):
running_loss = 0.0
targets = torch.empty(size=(BATCH_SIZE,)).to(device)
outputs = torch.empty(size=(BATCH_SIZE,)).to(device)
model.train()
for batch_idx,(data, target)in enumerate(train_loader):
batches += 1
data, target = Variable(data), Variable(target)
if torch.cuda.is_available() :
data = data.type(torch.FloatTensor ).cuda()
target = target.cuda()
targets = torch.cat(( targets, target), 0)
optimizer.zero_grad()
output = model(data)
loss = loss_func(output, target)
output = torch.argmax(torch.softmax(output, dim=1), dim=1)
outputs = torch.cat(( outputs, output), 0)
running_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
print('train/loss on EPOCH {}: {}'.format(epoch, running_loss/batches))
train_acc = accuracy_score(targets.cpu().detach().numpy().astype(int),
outputs.cpu().detach().numpy().astype(int))
print('train/accuracy: {} for epoch {}'.format(train_acc, epoch))
model.eval()
running_loss = 0.0
batches = 0
targets = torch.empty(size=(BATCH_SIZE,)).to(device)
outputs = torch.empty(size=(BATCH_SIZE,)).to(device)
for batch_idx,(data, target)in enumerate(valid_loader):
batches += 1
data, target = Variable(data, volatile=True), Variable(target)
if torch.cuda.is_available() :
data = data.type(torch.FloatTensor ).cuda()
target = target.cuda()
with torch.no_grad() :
targets = torch.cat(( targets, target), 0)
output = model(data)
loss = loss_func(output, target)
output = torch.argmax(torch.softmax(output, dim=1), dim=1)
outputs = torch.cat(( outputs, output), 0)
running_loss += loss.item()
val_loss = running_loss/batches
print('val/loss: {}'.format(val_loss))
val_acc = accuracy_score(targets.cpu().detach().numpy().astype(int),
outputs.cpu().detach().numpy().astype(int))
print('val/accuracy: {} for epoch {}'.format(val_acc, epoch))
if val_acc > best_val_accuracy:
best_val_accuracy = val_acc
min_val_loss = val_loss
print('Best val/acc: {} for epoch {}, saving model---->'.format(val_acc, epoch))
torch.save(model.state_dict() , "{}/snapshot_epoch_{}.pth".format(output_dir, epoch))
best_epoch = epoch
epochs_no_improve = 0
else:
epochs_no_improve += 1
if epochs_no_improve == n_epochs_stop:
print('Early stopping!')
break
|
Digit Recognizer
|
11,949,480 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
q_pad = 13523
a_pad = 3
start_token = 2<load_pretrained>
|
def prediction(model, data_loader):
model.eval()
test_pred = torch.LongTensor()
for i, data in enumerate(data_loader):
data = Variable(data, volatile=True)
if torch.cuda.is_available() :
data = data.type(torch.FloatTensor ).cuda()
output = model(data)
pred = output.cpu().data.max(1, keepdim=True)[1]
test_pred = torch.cat(( test_pred, pred), dim=0)
return test_pred
|
Digit Recognizer
|
11,949,480 |
group = pd.read_pickle(".. /input/groups/group.pandas" )<categorify>
|
model.load_state_dict(torch.load("snapshot_epoch_{}.pth".format(best_epoch)))
test_pred = prediction(model, test_loader)
submission = pd.DataFrame(np.c_[np.arange(1, len(test_dataset)+1)[:,None], test_pred.numpy() ],
columns=['ImageId', 'Label'])
|
Digit Recognizer
|
11,949,480 |
features_1_path = '.. /input/get-features-1/'
que_data = pd.read_pickle(features_1_path + "que_data.pickle")
difficulty =(np.round(que_data.que_correct_per, 1)*10 ).astype("int8" ).values
difficulty = torch.Tensor(difficulty ).long().to(device)
unique_tags = pd.concat([que_data.tags1,que_data.tags2, que_data.tags3, que_data.tags4,que_data.tags5,que_data.tags6] ).unique()
tags_n = len(unique_tags)
unk_tag = tags_n-1
que_data = que_data.replace(-1, unk_tag)
part_valus = torch.from_numpy(que_data.part.values ).long().to(device)
que_data = que_data.to_dict("index")
que_arr = np.zeros(( np.array(list(que_data.keys())).shape[0], 6))
for i in que_data:
a = que_data[i]
que_arr[i] = [a['tags1'],a['tags2'],a['tags3'],a['tags4'],a['tags5'],a['tags6']]
with open('.. /input/hashtable/user_info', 'rb')as handle:
user_info = pickle.load(handle )<drop_column>
|
submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
11,949,480 |
st_user_info = {}
for i in user_info:
st_user_info[i] = {"timestamp_ms":user_info[i]["first_timestamp"]}
del user_info<categorify>
|
submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
11,953,506 |
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self ).__init__()
self.dropout = nn.Dropout(p=dropout)
self.scale = nn.Parameter(torch.ones(1))
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float ).unsqueeze(1)
div_term = torch.exp(torch.arange(
0, d_model, 2 ).float() *(-math.log(10000.0)/ d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0 ).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.scale * self.pe[:x.size(0), :]
return self.dropout(x )<categorify>
|
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping
|
Digit Recognizer
|
11,953,506 |
class EmbedTag(nn.Module):
def __init__(self, d_model, que_arr, tags_n):
super(EmbedTag, self ).__init__()
self.que_arr = torch.LongTensor(que_arr ).to(device)
self.embedding = nn.Embedding(tags_n, d_model)
def forward(self, x):
x = self.que_arr[x, :]
x = self.embedding(x)
return torch.sum(x, dim=-2 )<categorify>
|
train_data = pd.read_csv('.. /input/digit-recognizer/train.csv')
test_data = pd.read_csv('.. /input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
11,953,506 |
class TransformerModel(nn.Module):
def __init__(self, intoken, outtoken, hidden, que_arr, part_arr, difficulty, enc_layers=4, dec_layers=4, dropout=0.1, ts_unique=70, prior_unique=50):
super(TransformerModel, self ).__init__()
nhead = hidden//64
self.encoder = nn.Embedding(intoken, hidden)
self.pos_encoder = PositionalEncoding(hidden, dropout)
self.decoder = nn.Embedding(outtoken, hidden)
self.pos_decoder = PositionalEncoding(hidden, dropout)
self.tagsEmbedder = EmbedTag(hidden, que_arr, tags_n)
self.transformer = nn.Transformer(d_model=hidden, nhead=nhead, num_encoder_layers=enc_layers, num_decoder_layers=dec_layers, dim_feedforward=hidden*4, dropout=dropout, activation='relu')
self.fc_out = nn.Linear(hidden, 1)
self.src_mask = None
self.trg_mask = None
self.memory_mask = None
self.part_embedding = nn.Embedding(7,hidden)
self.part_arr = part_arr
self.ts_embedding = nn.Embedding(ts_unique, hidden)
self.prior_embedding = nn.Embedding(prior_unique, hidden)
self.task_container_embedding = nn.Embedding(10000, hidden)
self.user_answer_embedding = nn.Embedding(5, hidden)
self.difficulty = difficulty
self.difficulty_embedding = nn.Embedding(11, hidden)
self.dropout_7 = nn.Dropout(dropout)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
self.dropout_4 = nn.Dropout(dropout)
self.dropout_5 = nn.Dropout(dropout)
self.dropout_6 = nn.Dropout(dropout)
self.explan_embedding = nn.Embedding(3, hidden)
self.dropout_9 = nn.Dropout(dropout)
def generate_square_subsequent_mask(self, sz, sz1=None):
if sz1 == None:
mask = torch.triu(torch.ones(sz, sz), 1)
else:
mask = torch.triu(torch.ones(sz, sz1), 1)
return mask.masked_fill(mask==1, float('-inf'))
def make_len_mask_a(self, inp):
return(inp == a_pad ).transpose(0, 1)
def make_len_mask_q(self, inp):
return(inp == q_pad ).transpose(0, 1)
def forward(self, src, trg, ts, prior, task_container, user_answer, explan):
if self.trg_mask is None or self.trg_mask.size(0)!= len(trg):
self.trg_mask = self.generate_square_subsequent_mask(len(trg)).to(trg.device)
if self.src_mask is None or self.src_mask.size(0)!= len(src):
self.src_mask = self.generate_square_subsequent_mask(len(src)).to(trg.device)
if self.memory_mask is None or self.memory_mask.size(0)!= len(trg)or self.memory_mask.size(1)!= len(src):
self.memory_mask = self.generate_square_subsequent_mask(len(trg),len(src)).to(trg.device)
src_pad_mask = self.make_len_mask_q(src)
trg_pad_mask = self.make_len_mask_a(trg)
part_emb = self.dropout_1(self.part_embedding(self.part_arr[src]-1))
ts_emb = self.dropout_3(self.ts_embedding(ts))
user_answer_emb = self.dropout_5(self.user_answer_embedding(user_answer))
src = self.encoder(src)
src = torch.add(src, part_emb)
src = torch.add(src, ts_emb)
src = self.pos_encoder(src)
trg = self.decoder(trg)
trg = torch.add(trg, user_answer_emb)
trg = self.pos_decoder(trg)
output = self.transformer(src, trg, src_mask=self.src_mask, tgt_mask=self.trg_mask, memory_mask=self.memory_mask,
src_key_padding_mask=src_pad_mask, tgt_key_padding_mask=trg_pad_mask, memory_key_padding_mask=src_pad_mask)
output = self.fc_out(output)
return output<load_pretrained>
|
X_train = train_data.drop(labels = ["label"],axis = 1)
Y_train = train_data["label"]
Y_train = to_categorical(Y_train, num_classes = 10 )
|
Digit Recognizer
|
11,953,506 |
d_model = 128
INPUT_DIM = q_pad+1
OUTPUT_DIM = 4
model_saint = TransformerModel(INPUT_DIM, OUTPUT_DIM, hidden=d_model, que_arr=que_arr,part_arr=part_valus, difficulty=difficulty ).to(device)
weights = torch.load(".. /input/last-saint/last.torch", map_location=torch.device(device))
model_saint.load_state_dict(weights)
model_saint.to(device)
model_saint.eval()<define_variables>
|
X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size = 0.2, random_state = 42 )
|
Digit Recognizer
|
11,953,506 |
def pred_users(vals):
eval_batch = vals.shape[0]
tensor_question = np.zeros(( eval_batch, 100), dtype=np.long)
tensor_answers = np.zeros(( eval_batch, 100), dtype=np.long)
tensor_ts = np.zeros(( eval_batch, 100), dtype=np.long)
tensor_user_answer = np.zeros(( eval_batch, 100), dtype=np.long)
val_len = []
preds = []
group_index = group.index
for i, line in enumerate(vals):
if line[2] == True:
val_len.append(0)
continue
user_id = line[0]
question_id = line[1]
timestamp = get_timestamp(line[4], user_id)
prior = get_prior(line[5])
task_container_id = line[3]
que_history = np.array([], dtype=np.int32)
answers_history = np.array([], dtype=np.int32)
ts_history = np.array([], dtype=np.int32)
user_answer_history = np.array([], dtype=np.int32)
if user_id in group_index:
cap = 99
que_history, answers_history, ts_history, user_answer_history = group[user_id]
que_history = que_history[-cap:]
answers_history = answers_history[-cap:]
ts_history = ts_history[-cap:]
user_answer_history = user_answer_history[-cap:]
a_token = 2
user_a_token = 4
answers_history = np.concatenate(( [a_token],answers_history))
user_answer_history = np.concatenate(( [user_a_token],user_answer_history))
que_history = np.concatenate(( que_history, [question_id]))
ts_history = np.concatenate(( ts_history, [timestamp]))
tensor_question[i][:len(que_history)] = que_history
tensor_answers[i][:len(que_history)] = answers_history
tensor_ts[i][:len(que_history)] = ts_history
tensor_user_answer[i][:len(que_history)] = user_answer_history
val_len.append(len(que_history))
tensor_question = torch.from_numpy(tensor_question ).long().T.to(device)
tensor_answers = torch.from_numpy(tensor_answers ).long().T.to(device)
tensor_ts = torch.from_numpy(tensor_ts ).long().T.to(device)
tensor_user_answer = torch.from_numpy(tensor_user_answer ).long().T.to(device)
with torch.no_grad() :
out = F.sigmoid(model_saint(tensor_question, tensor_answers, tensor_ts, None, None, tensor_user_answer,2)).squeeze(dim=-1 ).T
for j in range(len(val_len)) :
preds.append(out[j][val_len[j]-1].item())
return preds<string_transform>
|
train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2 )
|
Digit Recognizer
|
11,953,506 |
def split_preds(preds):
if preds.shape[0] > 1000:
ret = []
for i in np.array_split(preds, preds.shape[0]//1000):
ret.extend(pred_users(i))
return ret
else:
return pred_users(preds )<prepare_x_and_y>
|
test_datagen = ImageDataGenerator(rescale = 1./255 )
|
Digit Recognizer
|
11,953,506 |
def update_group_var(vals):
global group
for i, line in enumerate(vals):
user_id = line[0]
question_id = line[1]
content_type_id = line[2]
ts = get_timestamp(line[4], user_id)
correct = line[6]
user_answer = line[7]
if content_type_id == True:
continue
if st_user_info.get(user_id, -1)== -1:
st_user_info[user_id] = {"timestamp_ms":0}
else:
st_user_info[user_id]["timestamp_ms"] = line[4]
if user_id in group.index:
questions= np.append(group[user_id][0],[question_id])
answers= np.append(group[user_id][1],[correct])
ts= np.append(group[user_id][2],[ts])
user_answer= np.append(group[user_id][3],[user_answer])
group[user_id] =(questions, answers, ts, user_answer)
else:
group[user_id] =(np.array([question_id], dtype=np.int32), np.array([correct], dtype=np.int32), np.array([ts], dtype=np.int32)
,np.array([user_answer], dtype=np.int32))<define_variables>
|
train = train_datagen.flow(X_train, Y_train, batch_size = 128 )
|
Digit Recognizer
|
11,953,506 |
ordinal_enc = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 30: 21, 40: 22, 50: 23, 60: 24, 70: 25, 80: 26, 90: 27, 100: 28, 110: 29, 120: 30, 130: 31, 140: 32, 150: 33, 160: 34, 170: 35, 180: 36, 190: 37, 200: 38, 210: 39, 220: 40, 230: 41, 240: 42, 250: 43, 260: 44, 270: 45, 280: 46, 290: 47, 300: 48}
boundaries = [120,600,1800,3600,10800,43200,86400,259200,604800]
def get_prior(prior):
if prior != prior:
return 0
prior /= 1000
if prior > 20:
prior = np.round(prior, decimals=-1)
return ordinal_enc.get(round(prior),0)
def get_timestamp(ts, user_id):
if st_user_info.get(user_id, -1)== -1:
return 0
diff =(ts - st_user_info[user_id]["timestamp_ms"])/1000
if diff < 0:
return 0
if diff <= 60:
return int(diff)
for i, boundary in enumerate(boundaries):
if boundary > diff:
break
if i == len(boundaries)- 1:
return 60+i+1
return 60+i<define_variables>
|
test = test_datagen.flow(X_test, Y_test, batch_size = 128 )
|
Digit Recognizer
|
11,953,506 |
prior_part_mean_dict = {1: 22166.159642501425,
2: 18714.69673913695,
3: 23620.317746179924,
4: 23762.753651169547,
5: 25094.620302855932,
6: 32417.37918735745,
7: 47444.16407400242}<load_pretrained>
|
callback = EarlyStopping(monitor='loss', patience=8, restore_best_weights=True )
|
Digit Recognizer
|
11,953,506 |
with open('.. /input/lgbm-test/repeated_que_count', 'rb')as handle:
repeated_que_count = pickle.load(handle)
with open('.. /input/lgbm-test/user_info', 'rb')as handle:
user_info = pickle.load(handle)
with open('.. /input/lgbm-test/watched_tags', 'rb')as handle:
watched_tags = pickle.load(handle)
with open('.. /input/lgbm-test/containers_mean', 'rb')as handle:
containers_mean = pickle.load(handle)
with open('.. /input/lgbm-test/hardest', 'rb')as handle:
hard_questions = pickle.load(handle)
with open('.. /input/lgbm-test/easiest', 'rb')as handle:
easy_questions = pickle.load(handle)
with open('.. /input/lgbm-test/que_2_k', 'rb')as handle:
que_2_k = pickle.load(handle)
gc.collect()
<feature_engineering>
|
cnn = tf.keras.models.Sequential()
|
Digit Recognizer
|
11,953,506 |
for u in user_info:
user_info[u]["count_2"] = user_info[u]["count"]
user_info[u]["part_count_2"] = user_info[u]["part_count"].copy()
user_info[u]["last_part"] = 1<init_hyperparams>
|
cnn.add(tf.keras.layers.Conv2D(filters = 32, kernel_size = 5, padding = 'same', activation = 'relu', input_shape = [28, 28, 1]))
|
Digit Recognizer
|
11,953,506 |
groups = pd.read_pickle(".. /input/lgbm-test/groups")
def numpy_ewma_vectorized_v2(data, window):
alpha = 2 /(window + 1.0)
alpha_rev = 1-alpha
n = data.shape[0]
pows = alpha_rev**(np.arange(n+1))
scale_arr = 1/pows[:-1]
offset = data[0]*pows[1:]
pw0 = alpha*alpha_rev**(n-1)
mult = data*pw0*scale_arr
cumsums = mult.cumsum()
out = offset + cumsums*scale_arr[::-1]
return out<categorify>
|
cnn.add(tf.keras.layers.MaxPool2D(pool_size = 2, strides = 2, padding = 'valid'))
|
Digit Recognizer
|
11,953,506 |
features_1_path = '.. /input/get-features-1/'
que_data = pd.read_pickle(features_1_path + "que_data.pickle")
questions = que_data.drop(columns=["options_number","correctness_number", "correct_answer","tags6","tags5", "tags4"] ).to_dict("index")
questions1 = que_data[["tags1", "tags2", "tags3","tags4","tags5", "tags6"]].to_dict("index")
parts = que_data.part.to_dict()<load_from_csv>
|
cnn.add(tf.keras.layers.Conv2D(filters = 64, kernel_size = 3, padding = 'same'))
cnn.add(tf.keras.layers.MaxPool2D(pool_size = 2, strides = 2, padding='valid'))
|
Digit Recognizer
|
11,953,506 |
lec_data = pd.read_csv(".. /input/riiid-test-answer-prediction/lectures.csv")
lec_dict = lec_data[["lecture_id", "tag"]].set_index("lecture_id" ).tag.to_dict()
features = [
'task_container_id', "ts_diff_shifted", "watched","ts_diff_shifted_2",
'content_id', "k", "k_acc", "el_avg", "wut",
'prior_question_elapsed_time', "time_diff2", "rolling_mean_5", "rolling_mean_10", "rolling_mean_15", "prior_question_had_explanation_u_part_avg",
'prior_question_had_explanation', "hard_ratio_opp", "easy_ratio_opp", "correct_recency", "prior_question_elapsed_time_u_part_avg", "ewm_mean_10", "rolling_mean_5_prior_question",
'last_lecture', "part_mean", "opp_mean", "mean_pause", "timestamp", "prior_part_mean",
"container_mean", "lecs_per", "hard_ratio", "easy_ratio",
'que_count_user', 'question_repeated', "rolling_mean","time_diff3", "time_diff4",
'user_mean', "time_diff1", "time_diff", "sessions", "session_count", "prior_question_had_explanation_ratio"
] + que_data.columns.tolist() [:-1]
features.remove("options_number")
features.remove("correct_answer")
features.remove("tags6")
features.remove("tags5")
features.remove("tags4")
test_cols = ['row_id','timestamp','user_id','content_id','content_type_id','task_container_id','prior_question_elapsed_time',
'prior_question_had_explanation','prior_group_answers_correct','prior_group_responses']
<define_variables>
|
cnn.add(tf.keras.layers.MaxPool2D(pool_size = 2, strides = 2, padding='valid'))
cnn.add(Droupout(0.5))
|
Digit Recognizer
|
11,953,506 |
stack_features = [
'task_container_id', "ts_diff_shifted", "watched","ts_diff_shifted_2",
'content_id', "k", "k_acc", "el_avg", "wut", "lgb_preds", "st_preds",
'prior_question_elapsed_time', "time_diff2", "rolling_mean_5", "rolling_mean_10", "rolling_mean_15", "prior_question_had_explanation_u_part_avg",
'prior_question_had_explanation', "hard_ratio_opp", "easy_ratio_opp", "correct_recency", "prior_question_elapsed_time_u_part_avg", "ewm_mean_10", "rolling_mean_5_prior_question",
'last_lecture', "part_mean", "opp_mean", "mean_pause", "timestamp", "prior_part_mean",
"container_mean", "lecs_per", "hard_ratio", "easy_ratio",
'que_count_user', 'question_repeated', "rolling_mean","time_diff3", "time_diff4",
'user_mean', "time_diff1", "time_diff", "sessions", "session_count", "prior_question_had_explanation_ratio"
] + que_data.columns.tolist() [:-1]
stack_features.remove("options_number")
stack_features.remove("correct_answer")
stack_features.remove("tags6")
stack_features.remove("tags5")
stack_features.remove("tags4")
lgb_preds_idx = stack_features.index("lgb_preds")
st_preds_idx = stack_features.index("st_preds" )<define_variables>
|
cnn.add(tf.keras.layers.Flatten() )
|
Digit Recognizer
|
11,953,506 |
k_size = 20
cols = {test_cols[k]:k for k in range(len(test_cols)) }
features_dict = {features[k]:k for k in range(len(features)) }<init_hyperparams>
|
cnn.add(tf.keras.layers.Dense(units=256, activation='relu'))
|
Digit Recognizer
|
11,953,506 |
new_user = {'count': 0, 'mean_acc':0.5, 'correct_count': 0, 'last_lec':0, 'tmp':0,"first_timestamp":0, "second_timestamp":0,
"third_timestamp":0, "fourth_timestamp":0, "fifth_timestamp":0, "lecs_n":0,"interaction_n":0, "ts_diff_shifted":0.,
"part_corr":np.zeros(( 7), dtype=np.uint16), "part_count":np.zeros(( 7), dtype=np.uint16), "hard_ct":0, "hard_cr":0, "easy_ct":0, "easy_cr":0,
"sessions":0, "session_count":0, "sum_pauses":0., "had_exp":0, "el_sum":0, "part_et": np.zeros(( 7), dtype=np.float),
"part_explan": np.zeros(( 7), dtype=np.uint16), "k_count": np.zeros(( k_size), dtype=np.uint16), "k_corr": np.zeros(( k_size), dtype=np.uint16),
"recent_corr":0, "priors_5": [], "ts_diff_shifted_2":0., "count_2":0, "part_count_2":np.zeros(( 7), dtype=np.uint16), "last_part":1}<define_variables>
|
cnn.add(tf.keras.layers.Dense(units=10, activation='softmax'))
|
Digit Recognizer
|
11,953,506 |
def get_meta_data(data_1):
user_id = data_1[cols['user_id']]
content_type_id = data_1[cols['content_type_id']]
content_id = data_1[cols['content_id']]
prior_group_answers_correct = data_1[cols['prior_group_answers_correct']]
timestamp = data_1[cols['timestamp']]
task_container_id = data_1[cols['task_container_id']]
prior_question_had_explanation = data_1[cols['prior_question_had_explanation']]
elapsed = data_1[cols['prior_question_elapsed_time']]
return user_id, content_type_id, content_id, prior_group_answers_correct, timestamp,task_container_id,prior_question_had_explanation, elapsed<feature_engineering>
|
cnn.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'] )
|
Digit Recognizer
|
11,953,506 |
def add_user(user_id):
user_info[user_id] = copy.deepcopy(new_user)
repeated_que_count[user_id] = {}
groups[user_id] = []<feature_engineering>
|
model = cnn.fit_generator(train, epochs = 100, validation_data = test, callbacks = [callback] )
|
Digit Recognizer
|
11,953,506 |
def update_user_part_acc(user_id, question, answered_correctly, elapsed, explan):
part = parts.get(question, -1)
user_info[user_id]["part_count"][part-1] += 1
user_info[user_id]["part_corr"][part-1] += answered_correctly
if not isinstance(explan, pd._libs.missing.NAType)and explan == explan:
user_info[user_id]["had_exp"] += explan<feature_engineering>
|
cnn.evaluate(X_test,Y_test,verbose=2 )
|
Digit Recognizer
|
11,953,506 |
def update_user(user_id, had_exp, elapsed, content_id ,answered_correctly, timestamp):
user_info[user_id]['count'] += 1
if repeated_que_count[user_id].get(content_id, -1)== -1:
repeated_que_count[user_id][content_id] = 1
else:
repeated_que_count[user_id][content_id] += 1
if answered_correctly:
user_info[user_id]['correct_count'] += 1
user_info[user_id]['mean_acc'] = user_info[user_id]['correct_count']/user_info[user_id]['count']
update_user_part_acc(user_id, content_id, answered_correctly, elapsed, had_exp)
if hard_questions.get(content_id, False):
user_info[user_id]["hard_ct"] += 1
user_info[user_id]["hard_cr"] += answered_correctly
if easy_questions.get(content_id, False):
user_info[user_id]["easy_ct"] += 1
user_info[user_id]["easy_cr"] += answered_correctly
k = que_2_k[content_id]
user_info[user_id]["k_count"][k] += 1
user_info[user_id]["k_corr"][k] += answered_correctly
if answered_correctly:
user_info[user_id]["recent_corr"] = timestamp<feature_engineering>
|
test_data /= 256
test_data = test_data.values.reshape(-1,28,28,1)
results = cnn.predict(test_data )
|
Digit Recognizer
|
11,953,506 |
def update_lec_data(user_id, content_id):
if watched_tags.get(str(user_id), -1)== -1:
watched_tags[str(user_id)] = {}
if user_info.get(user_id, -1)== -1:
add_user(user_id)
user_info[user_id]["lecs_n"] += 1
lec_tag = lec_dict[content_id]
watched_tags[str(user_id)][str(lec_tag)] = 1
user_info[user_id]['last_lec'] = content_id<feature_engineering>
|
submission = pd.concat([pd.Series(range(1,28001),name = 'ImageId'),results],axis = 1 )
|
Digit Recognizer
|
11,953,506 |
def non_lag_update(user_id, timestamp, elapsed, explan, lec):
timestamp = timestamp/8.64e+7
diff_timestamp_1 = timestamp - user_info[user_id]["first_timestamp"]
diff_timestamp_2 = timestamp - user_info[user_id]["second_timestamp"]
diff_timestamp_3 = timestamp - user_info[user_id]["third_timestamp"]
diff_timestamp_4 = timestamp - user_info[user_id]["fourth_timestamp"]
diff_timestamp_5 = timestamp - user_info[user_id]["fifth_timestamp"]
user_info[user_id]["fifth_timestamp"] = user_info[user_id]["fourth_timestamp"]
user_info[user_id]["fourth_timestamp"] = user_info[user_id]["third_timestamp"]
user_info[user_id]["third_timestamp"] = user_info[user_id]["second_timestamp"]
user_info[user_id]["second_timestamp"] = user_info[user_id]["first_timestamp"]
user_info[user_id]["first_timestamp"] = timestamp
if(user_info[user_id]["second_timestamp"] - user_info[user_id]["third_timestamp"])!= 0:
user_info[user_id]["ts_diff_shifted"] = user_info[user_id]["second_timestamp"]*8.64e+7 - user_info[user_id]["third_timestamp"]*8.64e+7
if(user_info[user_id]["second_timestamp"] - user_info[user_id]["fourth_timestamp"])!= 0:
user_info[user_id]["ts_diff_shifted_2"] = user_info[user_id]["second_timestamp"]*8.64e+7 - user_info[user_id]["fourth_timestamp"]*8.64e+7
if not isinstance(explan, pd._libs.missing.NAType)and explan == explan and not lec:
user_info[user_id]["priors_5"].append(explan)
if diff_timestamp_1 > 0.083:
user_info[user_id]["sessions"] += 1
user_info[user_id]["session_count"] = 0
user_info[user_id]["sum_pauses"] += diff_timestamp_1
user_info[user_id]["session_count"] += 1
return diff_timestamp_1, diff_timestamp_2, diff_timestamp_3, diff_timestamp_4, diff_timestamp_5<prepare_x_and_y>
|
submission.to_csv('./submission.csv',index = False )
|
Digit Recognizer
|
11,914,085 |
def update_data(prior_group_answers_correct):
global tmp_data
arr = np.array(ast.literal_eval(prior_group_answers_correct))
for i, line in enumerate(tmp_data):
user_id = line[cols['user_id']]
content_type_id = line[cols['content_type_id']]
content_id = line[cols['content_id']]
timestamp = line[cols['timestamp']]
task_container_id = line[cols['task_container_id']]
explan = line[cols['prior_question_had_explanation']]
if isinstance(explan, pd._libs.missing.NAType)or explan == explan:
explan = 0
elapsed = line[cols['prior_question_elapsed_time']]
if isinstance(elapsed, pd._libs.missing.NAType)or elapsed == elapsed:
elapsed = 0
answered_correctly = arr[i]
user_arr = groups[user_id]
user_arr.insert(len(user_arr), answered_correctly)
groups[user_id] = user_arr
if content_type_id == False:
if user_info.get(user_id, -1)== -1:
add_user(user_id)
update_user(user_id, explan, elapsed, content_id, answered_correctly, timestamp)
tmp_data = []<define_variables>
|
train=pd.read_csv("/kaggle/input/digit-recognizer/train.csv" )
|
Digit Recognizer
|
11,914,085 |
tmp_data = []<define_variables>
|
test=pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
11,914,085 |
def preprocess_1(chunk):
data_1 = chunk.values
out = np.zeros(( data_1.shape[0], len(features)))
batch_counts = data_1[:, [cols["user_id"],cols["content_type_id"]]]
batch_counts = Counter(batch_counts[batch_counts[:, 1] == False][:, 0])
global tmp_data
for i in range(data_1.shape[0]):
user_id, content_type_id, content_id ,prior_group_answers_correct, timestamp, task_container_id, explan, elapsed = get_meta_data(data_1[i])
part = parts.get(content_id, -1)
task_count = batch_counts[user_id]
if prior_group_answers_correct == prior_group_answers_correct and prior_group_answers_correct != '[]':
update_data(prior_group_answers_correct)
tmp_data.append(data_1[i].tolist())
if content_type_id:
update_lec_data(user_id, content_id)
if user_info.get(user_id, -1)== -1:
add_user(user_id)
user_info[user_id]["interaction_n"] += 1
_ = non_lag_update(user_id, timestamp, elapsed, explan, content_type_id)
diff_timestamp_1, diff_timestamp_2, diff_timestamp_3, diff_timestamp_4, diff_timestamp_5 = _
if not content_type_id:
if not isinstance(elapsed, pd._libs.missing.NAType)and elapsed == elapsed:
user_info[user_id]["el_sum"] += elapsed
else:
elapsed = 0
if isinstance(explan, pd._libs.missing.NAType)or explan != explan:
explan = 0
if user_info[user_id]["count_2"] != 0:
last_part = user_info[user_id]["last_part"]
user_info[user_id]["part_et"][last_part-1] += elapsed
user_info[user_id]["part_explan"][last_part-1] += explan
user_info[user_id]["last_part"] = part
user_info[user_id]["part_count_2"][part-1] += 1
user_info[user_id]["count_2"] += 1
out[i, features_dict['content_id']] = content_id
out[i, features_dict['task_container_id']] = data_1[i, cols['task_container_id']]
out[i, features_dict['prior_question_elapsed_time']] = elapsed
out[i, features_dict['el_avg']] =(user_info[user_id]["el_sum"]/(user_info[user_id]["count_2"])) /1000
out[i, features_dict['prior_question_elapsed_time_u_part_avg']] =(user_info[user_id]["part_et"][part-1])/(user_info[user_id]["part_count_2"][part-1])
out[i, features_dict['prior_question_had_explanation']] = explan
out[i, features_dict['prior_question_had_explanation_u_part_avg']] = user_info[user_id]["part_explan"][part-1]/(user_info[user_id]["part_count_2"][part-1]+1)
out[i, features_dict['bundle_id']:] = np.array(list(questions.get(content_id ).values()))
out[i, features_dict['que_count_user']] = user_info.get(user_id, {} ).get('count',0)
out[i, features_dict['question_repeated']] = repeated_que_count.get(user_id, {} ).get(content_id, 0)+ 1
m = user_info.get(user_id, {} ).get('mean_acc', 0)
if m == 0:
out[i, features_dict['user_mean']] = 0.55
else:
out[i, features_dict['user_mean']] = m
out[i, features_dict['opp_mean']] = 1 - out[i, features_dict['user_mean']]
out[i, features_dict['last_lecture']] = user_info.get(user_id, {} ).get('last_lec',0)
out[i, features_dict['time_diff']] = diff_timestamp_1
out[i, features_dict['time_diff1']] = diff_timestamp_2
out[i, features_dict['time_diff2']] = diff_timestamp_3
out[i, features_dict['time_diff3']] = diff_timestamp_4
out[i, features_dict['time_diff4']] = diff_timestamp_5
out[i, features_dict['timestamp']] = timestamp/8.64e+7
out[i, features_dict['correct_recency']] =(timestamp - user_info[user_id]["recent_corr"])/8.64e+7
kk = user_info[user_id]["priors_5"][-5:]
if len(kk)!= 0:
out[i, features_dict['rolling_mean_5_prior_question']] = np.array(kk ).mean()
out[i, features_dict['ts_diff_shifted']] = user_info[user_id]["ts_diff_shifted"]
out[i, features_dict['ts_diff_shifted_2']] = user_info[user_id]["ts_diff_shifted_2"]
out[i, features_dict['container_mean']] = containers_mean[task_container_id]
out[i, features_dict['lecs_per']] = user_info[user_id]["lecs_n"]/user_info[user_id]["interaction_n"]*100
out[i, features_dict['sessions']] = user_info[user_id]["sessions"]
out[i, features_dict['session_count']] = user_info[user_id]["session_count"]
if user_info[user_id]["count"] != 0:
out[i, features_dict['prior_question_had_explanation_ratio']] = user_info[user_id]["had_exp"]/user_info[user_id]["count"]
if user_info[user_id]["sessions"] != 0:
out[i, features_dict['mean_pause']] = user_info[user_id]["sum_pauses"]/user_info[user_id]["sessions"]
nn = user_info.get(user_id, {} ).get('easy_ct',0)
if nn != 0:
out[i, features_dict['easy_ratio']] = user_info.get(user_id, {} ).get('easy_cr',0)/nn
out[i, features_dict['easy_ratio_opp']] = 1 - out[i, features_dict['easy_ratio']]
nn = user_info.get(user_id, {} ).get('hard_ct',0)
if nn != 0:
out[i, features_dict['hard_ratio']] = user_info.get(user_id, {} ).get('hard_cr',0)/nn
out[i, features_dict['hard_ratio_opp']] = 1 - out[i, features_dict['hard_ratio']]
nn = user_info.get(user_id, {} ).get('part_count',[])
if nn != []:
part = int(out[i, features_dict['part']] - 1)
ct = nn[part]
cr = user_info[user_id]['part_corr'][part]
if ct != 0:
out[i, features_dict['part_mean']] = cr/ct
if groups.get(user_id, -1)!= -1 and groups[user_id] != []:
last_arr = np.array(groups[user_id][-40:])
out[i, features_dict['rolling_mean']] = numpy_ewma_vectorized_v2(np.array(last_arr[-30:]),5)[-1]
last_arr = last_arr[last_arr != -1]
out[i, features_dict['ewm_mean_10']] = numpy_ewma_vectorized_v2(np.array(last_arr[-30:]),5)[-1]
out[i, features_dict['rolling_mean_10']] = np.array(last_arr[-10:] ).mean()
out[i, features_dict['rolling_mean_15']] = np.array(last_arr[-15:] ).mean()
out[i, features_dict['rolling_mean_5']] = np.array(last_arr[-5:] ).mean()
out[i, features_dict['prior_part_mean']] = prior_part_mean_dict[int(out[i, features_dict['part']])]
k = que_2_k[content_id]
out[i, features_dict['k']] = k
if user_info[user_id]["k_count"][k] != 0:
out[i, features_dict['k_acc']] = user_info[user_id]["k_corr"][k]/user_info[user_id]["k_count"][k]
out[i, features_dict['wut']] = user_info[user_id]["ts_diff_shifted"] - elapsed*task_count
if out[i, features_dict['wut']] < 0:
out[i, features_dict['wut']] = 0
usr = watched_tags.get(str(user_id), -1)
if usr != -1:
nn = 0
for k in range(6):
nn += usr.get(str(int(questions1[content_id]["tags"+str(k+1)])) , 0)
out[i, features_dict['watched']] = nn
return out
<define_variables>
|
label=train["label"].values
|
Digit Recognizer
|
11,914,085 |
model_lgbm = lgb.Booster(model_file='.. /input/lgbm-test/lgb_classifier.txt')
stack_lgbm = lgb.Booster(model_file='.. /input/lgbm-test/lgb_stack.txt' )<define_variables>
|
train.drop("label",axis=1,inplace=True )
|
Digit Recognizer
|
11,914,085 |
vals = 0<split>
|
t=train.values
ttest=test.values
|
Digit Recognizer
|
11,914,085 |
%%time
for(test_data,sample_prediction_df)in iter_test:
if not isinstance(vals, int):
if test_data.iloc[0].prior_group_answers_correct == test_data.iloc[0].prior_group_answers_correct:
past_vals = np.array(ast.literal_eval(test_data.iloc[0].prior_group_answers_correct))
past_answers = np.array(ast.literal_eval(test_data.iloc[0].prior_group_responses))
past_vals = np.concatenate(( vals, past_vals.reshape(len(past_vals),1)) , axis=1)
past_vals = np.concatenate(( past_vals, past_answers.reshape(len(past_answers),1)) , axis=1)
update_group_var(past_vals)
vals = test_data[["user_id","content_id", "content_type_id", "task_container_id","timestamp","prior_question_elapsed_time"]].values
test_transform = preprocess_1(test_data)
lgbm_predic = model_lgbm.predict(test_transform)
st_predic = np.array(split_preds(vals))
test_data['answered_correctly'] =(lgbm_predic*0.85 + 1.15*st_predic)/2
env.predict(test_data.loc[test_data['content_type_id'] == 0, ['row_id', 'answered_correctly']] )<categorify>
|
t=t.astype('float32')
ttest=ttest.astype('float32' )
|
Digit Recognizer
|
11,914,085 |
dicts_path = '/kaggle/input/agg-riiid/agg_riiid/'
user_content_id_agg = pd.read_pickle(dicts_path + 'user_content_id_agg.pkl.gzip')
user_content_id_agg['count'] = user_content_id_agg['count'].astype('int16' )<data_type_conversions>
|
t/=255
ttest/=255
|
Digit Recognizer
|
11,914,085 |
user_content_id_count_dict = user_content_id_agg['count'].astype('int16' ).to_dict(defaultdict(int))<set_options>
|
tl = keras.utils.to_categorical(label, 10)
|
Digit Recognizer
|
11,914,085 |
del user_content_id_agg
gc.collect()<import_modules>
|
input_shape =(28, 28, 1 )
|
Digit Recognizer
|
11,914,085 |
import numpy as np
import lightgbm as lgb
import pickle
import riiideducation
import joblib<define_variables>
|
model = Sequential()
model.add(Conv2D(32,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=input_shape))
model.add(MaxPooling2D(( 2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPooling2D(( 2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(128,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPooling2D(( 2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256,(3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPooling2D(( 2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
11,914,085 |
target = 'answered_correctly'<define_variables>
|
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'] )
|
Digit Recognizer
|
11,914,085 |
features = [
'content_id',
'prior_question_elapsed_time',
'prior_question_had_explanation',
'user_correctness',
'user_correctness_window_10_mean',
'part',
'content_count',
'content_sum',
'content_id_correctness_total',
'repeated_times',
'user_count_questions',
'explanation_mean_user',
'timestamp',
'timestamp_diff_last',
'user_count_lectures',
'last_lecture_tag',
'prior_questions_mean_time',
'last_correct',
'task_container_id',
'first_tag',
'second_tag',
'repeated_times_tag',
'user_tag_correctness',
'timestamp_diff_last_window_10_mean',
'user_count_lectures_mean'
]<categorify>
|
np.random.seed(1234)
(x_train,x_test,y_train,y_test)= train_test_split(t,tl, train_size=0.75, random_state=1 )
|
Digit Recognizer
|
11,914,085 |
dicts_path = '/kaggle/input/agg-riiid/agg_riiid/'
user_agg = pd.read_pickle(dicts_path + 'user_agg.pkl.gzip')
user_sum_dict = user_agg['sum'].astype('int32' ).to_dict(defaultdict(int))
user_count_dict = user_agg['count'].astype('int32' ).to_dict(defaultdict(int))
del user_agg
gc.collect()
content_agg = pd.read_pickle(dicts_path + 'content_agg.pkl.gzip')
content_sum_dict = content_agg['sum'].astype('int32' ).to_dict(defaultdict(int))
content_count_dict = content_agg['count'].astype('int32' ).to_dict(defaultdict(int))
del content_agg
gc.collect()
user_target_window_10 = pd.read_pickle(dicts_path + 'user_target_window_10.pkl.gzip')
user_target_window_10_dict = user_target_window_10.astype('int8' ).to_dict(defaultdict(int))
del user_target_window_10
gc.collect()
user_target_next_out_window_10_dict = defaultdict(int)
timestamp_diff_window_10 = pd.read_pickle(dicts_path + 'timestamp_diff_window_10.pkl.gzip')
timestamp_diff_window_10_dict = timestamp_diff_window_10.astype('int32' ).to_dict(defaultdict(int))
del timestamp_diff_window_10
gc.collect()
timestamp_diff_next_window_10_dict = defaultdict(int)
explanation_agg = pd.read_pickle(dicts_path + 'explanation_agg.pkl.gzip')
explanation_sum_dict = explanation_agg['sum'].astype('int32' ).to_dict(defaultdict(int))
explanation_count_dict = explanation_agg['count'].astype('int32' ).to_dict(defaultdict(int))
del explanation_agg
gc.collect()
user_lectures_agg = pd.read_pickle(dicts_path + 'user_lectures_agg.pkl.gzip')
user_lectures_count_dict = user_lectures_agg['sum'].astype('int16' ).to_dict(defaultdict(int))
del user_lectures_agg
gc.collect()
last_lecture_agg = pd.read_pickle(dicts_path + 'last_lecture_agg.pkl.gzip')
last_lecture_tag_dict = last_lecture_agg.astype('int16' ).to_dict(defaultdict(int))
del last_lecture_agg
gc.collect()
prior_questions_time_agg = pd.read_pickle(dicts_path + 'prior_questions_time_agg.pkl.gzip')
prior_questions_time_sum_dict = prior_questions_time_agg['sum'].astype('int32' ).to_dict(defaultdict(float))
prior_questions_time_count_dict = prior_questions_time_agg['count'].astype('int32' ).to_dict(defaultdict(int))
del prior_questions_time_agg
gc.collect()
last_correct_agg = pd.read_pickle(dicts_path + 'last_correct_agg.pkl.gzip')
last_correct_dict = last_correct_agg.astype('int8' ).to_dict(defaultdict(int))
del last_correct_agg
gc.collect()
user_tag_agg = pd.read_pickle(dicts_path + 'user_tag_agg.pkl.gzip')
user_tag_agg_sum_dict = user_tag_agg['sum'].astype('int16' ).to_dict(defaultdict(int))
user_tag_agg_count_dict = user_tag_agg['count'].astype('int16' ).to_dict(defaultdict(int))
del user_tag_agg
gc.collect()
last_timestamp_agg = pd.read_pickle(dicts_path + 'last_timestamp_agg.pkl.gzip')
last_timestamp_dict = last_timestamp_agg.astype('int32' ).to_dict(defaultdict(int))
del last_timestamp_agg
gc.collect()<load_pretrained>
|
model.fit(x_train, y_train,
batch_size=100,
epochs=400,
verbose=2,
validation_data=(x_test, y_test))
|
Digit Recognizer
|
11,914,085 |
model_path = '/kaggle/input/trained-model/'
file = model_path + 'trained_model.pkl'
model = pickle.load(open(file, 'rb'))
print('Trained LGB model was loaded!' )<load_from_csv>
|
y_pred=model.predict(ttest,verbose=0 )
|
Digit Recognizer
|
11,914,085 |
home_path = '/kaggle/input/riiid-test-answer-prediction/'
questions_df = pd.read_csv(home_path + 'questions.csv',
usecols=[0, 3, 4],
dtype={'question_id': 'int16', 'part': 'int8'}
)<data_type_conversions>
|
sample=pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv' )
|
Digit Recognizer
|
11,914,085 |
questions_df['tags'].fillna('92', inplace=True)
questions_df['first_tag'] = questions_df['tags'].apply(lambda x: x.split() [0])
questions_df['first_tag'] = questions_df['first_tag'].astype('int16')
questions_df['second_tag'] = questions_df['tags'].apply(lambda x: x.split() [1] if len(x.split())> 1 else -1)
questions_df['second_tag'] = questions_df['second_tag'].astype('int16')
questions_df.drop('tags',axis=1,inplace=True )<load_from_csv>
|
pred = np.argmax(y_pred, axis = 1 )
|
Digit Recognizer
|
11,914,085 |
lectures_df = pd.read_csv(home_path + 'lectures.csv',
dtype={'tag': 'int16', 'part': 'int8'}
)
type_of_dict = {'intention': 1, 'concept': 2, 'solving question': 3, 'starter': 4}
lectures_df['type_of'] = lectures_df['type_of'].map(type_of_dict )<import_modules>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),r],axis = 1 )
|
Digit Recognizer
|
11,914,085 |
import random
from tqdm.notebook import tqdm
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader<load_pretrained>
|
submission.to_csv("mnist4.csv",index=False )
|
Digit Recognizer
|
11,650,794 |
skills = joblib.load("/kaggle/input/skills-pkl/skills.pkl.zip")
n_skill = len(skills)
group = joblib.load("/kaggle/input/group-pkl/group.pkl.zip")
del joblib
gc.collect()<define_variables>
|
np.random.seed(1)
|
Digit Recognizer
|
11,650,794 |
MAX_SEQ = 180
ACCEPTED_USER_CONTENT_SIZE = 4
EMBED_SIZE = 128
BATCH_SIZE = 64
DROPOUT = 0.1<choose_model_class>
|
X_train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
X_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
print('Shape of the training data: ', X_train.shape)
print('Shape of the test data: ', X_test.shape )
|
Digit Recognizer
|
11,650,794 |
class FFN(nn.Module):
def __init__(self, state_size = 200, forward_expansion = 1, bn_size=MAX_SEQ - 1, dropout=0.2):
super(FFN, self ).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, forward_expansion * state_size)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm1d(bn_size)
self.lr2 = nn.Linear(forward_expansion * state_size, state_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.relu(self.lr1(x))
x = self.bn(x)
x = self.lr2(x)
return self.dropout(x)
FFN()<categorify>
|
y_train = X_train['label']
X_train.drop(labels = ['label'], axis=1, inplace=True )
|
Digit Recognizer
|
11,650,794 |
def future_mask(seq_length):
future_mask =(np.triu(np.ones([seq_length, seq_length]), k = 1)).astype('bool')
return torch.from_numpy(future_mask)
future_mask(5 )<choose_model_class>
|
y_train = to_categorical(y_train, num_classes=10)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1 )
|
Digit Recognizer
|
11,650,794 |
class TransformerBlock(nn.Module):
def __init__(self, embed_dim, heads = 8, dropout = DROPOUT, forward_expansion = 1):
super(TransformerBlock, self ).__init__()
self.multi_att = nn.MultiheadAttention(embed_dim=embed_dim, num_heads=heads, dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.layer_normal = nn.LayerNorm(embed_dim)
self.ffn = FFN(embed_dim, forward_expansion = forward_expansion, dropout=dropout)
self.layer_normal_2 = nn.LayerNorm(embed_dim)
def forward(self, value, key, query, att_mask):
att_output, att_weight = self.multi_att(value, key, query, attn_mask=att_mask)
att_output = self.dropout(self.layer_normal(att_output + value))
att_output = att_output.permute(1, 0, 2)
x = self.ffn(att_output)
x = self.dropout(self.layer_normal_2(x + att_output))
return x.squeeze(-1), att_weight
class Encoder(nn.Module):
def __init__(self, n_skill, max_seq=100, embed_dim=128, dropout = DROPOUT, forward_expansion = 1, num_layers=1, heads = 8):
super(Encoder, self ).__init__()
self.n_skill, self.embed_dim = n_skill, embed_dim
self.embedding = nn.Embedding(2 * n_skill + 1, embed_dim)
self.pos_embedding = nn.Embedding(max_seq - 1, embed_dim)
self.e_embedding = nn.Embedding(n_skill+1, embed_dim)
self.layers = nn.ModuleList([TransformerBlock(embed_dim, forward_expansion = forward_expansion)for _ in range(num_layers)])
self.dropout = nn.Dropout(dropout)
def forward(self, x, question_ids):
device = x.device
x = self.embedding(x)
pos_id = torch.arange(x.size(1)).unsqueeze(0 ).to(device)
pos_x = self.pos_embedding(pos_id)
x = self.dropout(x + pos_x)
x = x.permute(1, 0, 2)
e = self.e_embedding(question_ids)
e = e.permute(1, 0, 2)
for layer in self.layers:
att_mask = future_mask(e.size(0)).to(device)
x, att_weight = layer(e, x, x, att_mask=att_mask)
x = x.permute(1, 0, 2)
x = x.permute(1, 0, 2)
return x, att_weight
class SAKTModel(nn.Module):
def __init__(self, n_skill, max_seq=100, embed_dim=128, dropout = DROPOUT, forward_expansion = 1, enc_layers=1, heads = 8):
super(SAKTModel, self ).__init__()
self.encoder = Encoder(n_skill, max_seq, embed_dim, dropout, forward_expansion, num_layers=enc_layers)
self.pred = nn.Linear(embed_dim, 1)
def forward(self, x, question_ids):
x, att_weight = self.encoder(x, question_ids)
x = self.pred(x)
return x.squeeze(-1), att_weight<set_options>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 256, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
model.summary()
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.