kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
10,296,992 |
y = "revenue"
x = h2o_df.columns
x.remove(y )<choose_model_class>
|
callback_lrs = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x)
epochs = 40
history = [0]*10
for j in range(10):
X_train1, X_valid1, y_train1, y_valid1 = train_test_split(X_train, y_train, test_size = 0.1)
history[j] = model[j].fit_generator(datagen.flow(X_train1, y_train1, batch_size = batch_size), epochs = epochs, verbose = 0,
validation_data =(X_valid1, y_valid1), callbacks = [callback_lrs])
print('CNN:', j+1, 'Epochs =', epochs, 'Train accuracy:', max(history[j].history['accuracy']), 'Validation accuracy:', max(history[j].history['val_accuracy']))
|
Digit Recognizer
|
10,296,992 |
<train_model><EOS>
|
results = np.zeros(( X_test.shape[0],10))
for j in range(10):
results = results + model[j].predict(X_test)
y_test_class = np.argmax(results, axis = 1)
submission = pd.DataFrame({'ImageId': list(range(1, len(y_test_class)+1)) , 'Label': np.array(y_test_class)})
submission.to_csv('submission.csv', index=False)
print(submission )
|
Digit Recognizer
|
7,206,401 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<predict_on_test>
|
%matplotlib inline
|
Digit Recognizer
|
7,206,401 |
pred = aml.predict(h2o_valid)
pred.head()<save_model>
|
data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
print(data.shape )
|
Digit Recognizer
|
7,206,401 |
h2o.save_model(aml.leader, path="./model_bin" )<train_model>
|
test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
print(test_data.shape )
|
Digit Recognizer
|
7,206,401 |
params = {'objective': 'reg:linear',
'eta': 0.01,
'max_depth': 6,
'min_child_weight': 3,
'subsample': 0.8,
'colsample_bytree': 0.8,
'colsample_bylevel': 0.50,
'gamma': 1.45,
'eval_metric': 'rmse',
'seed': 12,
'silent': True}
xgb_data = [(xgb.DMatrix(X_train, y_train), 'train'),(xgb.DMatrix(X_valid, y_valid), 'valid')]
print('Starting training...')
xgb_model = xgb.train(params,
xgb.DMatrix(X_train, y_train),
10000,
xgb_data,
verbose_eval=300,
early_stopping_rounds=300 )<predict_on_test>
|
sample_submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
print(sample_submission.shape )
|
Digit Recognizer
|
7,206,401 |
xgb_pred = xgb_model.predict(xgb.DMatrix(X_valid))<drop_column>
|
encoder = OneHotEncoder(sparse=False,categories='auto')
yy = [[0],[1],[2],[3],[4],[5],[6],[7],[8],[9]]
encoder.fit(yy)
train_label = train_label.reshape(-1,1)
val_label = val_label.reshape(-1,1)
train_label = encoder.transform(train_label)
val_label = encoder.transform(val_label)
print('train_label shape: %s'%str(train_label.shape))
print('val_label shape: %s'%str(val_label.shape))
|
Digit Recognizer
|
7,206,401 |
X_test = test.drop('revenue',axis=1 )<prepare_x_and_y>
|
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from keras.layers.normalization import BatchNormalization
from keras.layers import LeakyReLU
|
Digit Recognizer
|
7,206,401 |
X_test[X_test==np.inf]=np.nan
X_test.fillna(X_test.mean() , inplace=True )<predict_on_test>
|
model = Sequential()
model.add(Conv2D(32,(3, 3), activation='relu', input_shape=(28, 28, 1),padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(32,(3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(128, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(256, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu', name='my_dense'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary()
|
Digit Recognizer
|
7,206,401 |
test_pred_xgb = xgb_model.predict(xgb.DMatrix(( X_test)) , ntree_limit=xgb_model.best_ntree_limit )<choose_model_class>
|
datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range = 15,
horizontal_flip = False,
zoom_range = 0.20 )
|
Digit Recognizer
|
7,206,401 |
model = CatBoostRegressor(iterations=100000,
learning_rate=0.005,
depth=5,
eval_metric='RMSE',
colsample_bylevel=0.8,
random_seed = 21,
bagging_temperature = 0.2,
metric_period = None,
early_stopping_rounds=200
)
model.fit(X_train, y_train,eval_set=(X_valid, y_valid),use_best_model=True,verbose=500)
val_pred = model.predict(X_valid)
print('RMSE',np.sqrt(mean_squared_error(val_pred,y_valid)))
test_pred_cat = model.predict(X_test )<init_hyperparams>
|
model.compile(loss='categorical_crossentropy',optimizer=Adam() ,metrics=['accuracy'])
datagen.fit(train_image)
history = model.fit_generator(datagen.flow(train_image,train_label, batch_size=32),
epochs = 75,
shuffle=True,
validation_data =(val_image,val_label),
verbose = 1,
steps_per_epoch=train_image.shape[0] // 32 )
|
Digit Recognizer
|
7,206,401 |
params = {'objective':'regression',
'num_leaves' : 30,
'min_data_in_leaf' : 20,
'max_depth' : 9,
'learning_rate': 0.004,
'feature_fraction':0.9,
"bagging_freq": 1,
"bagging_fraction": 0.9,
'lambda_l1': 0.2,
"bagging_seed": 11,
"metric": 'rmse',
"random_state" : 11,
"verbosity": -1}
record = dict()
model = lgb.train(params
, lgb.Dataset(X_train, y_train)
, num_boost_round = 100000
, valid_sets = [lgb.Dataset(X_valid, y_valid)]
, verbose_eval = 500
, early_stopping_rounds = 500
, callbacks = [lgb.record_evaluation(record)]
)
best_idx = np.argmin(np.array(record['valid_0']['rmse']))
val_pred = model.predict(X_valid, num_iteration = model.best_iteration)
test_pred_gbm = model.predict(X_test, num_iteration = model.best_iteration )<save_to_csv>
|
intermediate_output = intermediate_layer_model.predict(train_image)
intermediate_output = pd.DataFrame(data=intermediate_output )
|
Digit Recognizer
|
7,206,401 |
sub = pd.read_csv('.. /input/tmdb-box-office-prediction/sample_submission.csv')
df_sub = pd.DataFrame()
df_sub['id'] = sub['id']
final_pred = 0.3*test_pred_xgb + 0.7*test_pred_cat
df_sub['revenue'] = np.expm1(final_pred)
print(df_sub['revenue'])
df_sub.to_csv("submission.csv", index=False )<import_modules>
|
val_data = intermediate_output[40000:]
|
Digit Recognizer
|
7,206,401 |
confusion_matrix)
<define_variables>
|
submission_cnn = model.predict(test_image )
|
Digit Recognizer
|
7,206,401 |
PRETRAINED_MODEL_NAME = 'bert-base-uncased'
LABELS_NUMBER = 2
MAX_LENGHT = 512
BATCH_SIZE = 6
LEARNING_RATE = 2e-5
EPOCHS_NUMBER = 1
N_PREDICTIONS_TO_SHOW = 10<load_from_csv>
|
intermediate_test_output = intermediate_layer_model.predict(test_image)
intermediate_test_output = pd.DataFrame(data=intermediate_test_output )
|
Digit Recognizer
|
7,206,401 |
train_data = pd.read_csv('.. /input/nlp-getting-started/train.csv')
print(train_data.shape)
train_data.head(3 )<load_from_csv>
|
xgbmodel = XGBClassifier(objective='multi:softprob',
num_class= 10)
xgbmodel.fit(intermediate_output, train_label1)
xgbmodel.score(val_data, val_label1 )
|
Digit Recognizer
|
7,206,401 |
test_data = pd.read_csv('.. /input/nlp-getting-started/test.csv')
print(test_data.shape)
test_data.head(3 )<load_pretrained>
|
submission_xgb = xgbmodel.predict(intermediate_test_output )
|
Digit Recognizer
|
7,206,401 |
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME,
do_lower_case=True )<string_transform>
|
submission_cnn = submission_cnn.astype(int)
submission_xgb = submission_xgb.astype(int)
|
Digit Recognizer
|
7,206,401 |
vocabulary = tokenizer.get_vocab()
print(f'Size of the vocabulary: {len(vocabulary)}')
print(f'Some tokens of the vocabulary: {list(vocabulary.keys())[5000:5010]}' )<categorify>
|
submission_cnn
label = np.argmax(submission_cnn,1)
id_ = np.arange(0,label.shape[0])
label
|
Digit Recognizer
|
7,206,401 |
def prepare_sequence(text):
prepared_sequence = tokenizer.encode_plus(
text,
add_special_tokens = True,
max_length = MAX_LENGHT,
padding = 'max_length',
return_attention_mask = True
)
return prepared_sequence<categorify>
|
final_sub = submission_xgb
|
Digit Recognizer
|
7,206,401 |
test_sentence = 'Is this jacksonville?'
test_sentence_encoded = prepare_sequence(test_sentence)
token_ids = test_sentence_encoded["input_ids"]
print(f'Test sentence: {test_sentence}')
print(f'Keys: {test_sentence_encoded.keys() }')
print(f'Tokens: {tokenizer.convert_ids_to_tokens(token_ids)[:12]}')
print(f'Token IDs: {token_ids[:12]}')
print(f'Segment IDs: {test_sentence_encoded["token_type_ids"][:12]}')
print(f'Mask IDs {test_sentence_encoded["attention_mask"][:12]}')
print(f'Input dimension: {len(token_ids)}' )<categorify>
|
save = pd.DataFrame({'ImageId':sample_submission.ImageId,'label':final_sub})
print(save.head(10))
save.to_csv('submission.csv',index=False )
|
Digit Recognizer
|
231,224 |
def map_example_to_dict(input_ids, attention_masks, token_type_ids, label):
mapped_example = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_masks,
}
return mapped_example, label
def encode_examples(texts_and_labels):
input_ids_list = []
token_type_ids_list = []
attention_mask_list = []
label_list = []
for text, label in texts_and_labels:
bert_input = prepare_sequence(text)
input_ids_list.append(bert_input['input_ids'])
token_type_ids_list.append(bert_input['token_type_ids'])
attention_mask_list.append(bert_input['attention_mask'])
label_list.append([label])
dataset = tf.data.Dataset.from_tensor_slices(
(input_ids_list, attention_mask_list, token_type_ids_list,
label_list)
)
dataset_mapped = dataset.map(map_example_to_dict)
return dataset_mapped<prepare_x_and_y>
|
train = pd.read_csv(".. /input/train.csv")
print(train.shape)
train.head()
|
Digit Recognizer
|
231,224 |
X = train_data["text"]
y = train_data["target"]<split>
|
z_train = Counter(train['label'])
z_train
|
Digit Recognizer
|
231,224 |
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.10,
random_state=1 )<count_values>
|
test= pd.read_csv(".. /input/test.csv")
print(test.shape)
test.head()
|
Digit Recognizer
|
231,224 |
n_training_examples = X_train.shape[0]
n_positive_training_examples = y_train.value_counts() [1]
n_negative_training_examples = y_train.value_counts() [0]
print(f'Number examples in training dataset: {n_training_examples}')
print(f'Number of positive examples in training dataset: {n_positive_training_examples}')
print(f'Number of negative examples in training dataset: {n_negative_training_examples}' )<define_variables>
|
x_train =(train.ix[:,1:].values ).astype('float32')
y_train = train.ix[:,0].values.astype('int32')
x_test = test.values.astype('float32' )
|
Digit Recognizer
|
231,224 |
train_dataset = list(zip(X_train, y_train))
val_dataset = list(zip(X_val, y_val))<categorify>
|
x_train = x_train/255.0
x_test = x_test/255.0
|
Digit Recognizer
|
231,224 |
ds_train_encoded = encode_examples(train_dataset ).shuffle(10000 ).batch(BATCH_SIZE)
ds_val_encoded = encode_examples(val_dataset ).batch(BATCH_SIZE )<load_pretrained>
|
batch_size = 64
num_classes = 10
epochs = 20
input_shape =(28, 28, 1 )
|
Digit Recognizer
|
231,224 |
def get_model() :
config = AutoConfig.from_pretrained(PRETRAINED_MODEL_NAME,
hidden_dropout_prob=0.2,
num_labels=LABELS_NUMBER)
model = TFBertForSequenceClassification.from_pretrained(PRETRAINED_MODEL_NAME,
config=config)
return model<choose_model_class>
|
y_train = keras.utils.to_categorical(y_train, num_classes)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state=42 )
|
Digit Recognizer
|
231,224 |
model = get_model()
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
model.compile(optimizer=optimizer, loss=loss, metrics=[metric] )<train_model>
|
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',kernel_initializer='he_normal',input_shape=input_shape))
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',kernel_initializer='he_normal'))
model.add(MaxPool2D(( 2, 2)))
model.add(Dropout(0.20))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal'))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.RMSprop() ,
metrics=['accuracy'])
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.0001)
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=15,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False )
|
Digit Recognizer
|
231,224 |
weight_for_0 =(1 / n_negative_training_examples)*(n_training_examples)/2.0
weight_for_1 =(1 / n_positive_training_examples)*(n_training_examples)/2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
print('Weight for class 0: {:.2f}'.format(weight_for_0))
print('Weight for class 1: {:.2f}'.format(weight_for_1))<train_model>
|
datagen.fit(X_train)
h = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 1, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction], )
|
Digit Recognizer
|
231,224 |
model.fit(ds_train_encoded, epochs=EPOCHS_NUMBER, validation_data=ds_val_encoded,
class_weight = class_weight )<predict_on_test>
|
final_loss, final_acc = model.evaluate(X_val, Y_val, verbose=0)
print("Final loss: {0:.6f}, final accuracy: {1:.6f}".format(final_loss, final_acc))
|
Digit Recognizer
|
231,224 |
val_predictions = model.predict(ds_val_encoded)
val_probabilities = softmax(val_predictions[0], axis=1)
y_val_predictions = np.argmax(val_probabilities, axis=1 ).flatten()<categorify>
|
layer_outputs = [layer.output for layer in model.layers[:8]]
activation_model = models.Model(input=model.input, output=layer_outputs)
activations = activation_model.predict(test_im.reshape(1,28,28,1))
first_layer_activation = activations[0]
plt.matshow(first_layer_activation[0, :, :, 4], cmap='viridis' )
|
Digit Recognizer
|
231,224 |
def encode_test_examples(texts):
input_ids_list = []
token_type_ids_list = []
attention_mask_list = []
for text in texts:
bert_input = prepare_sequence(text)
input_ids_list.append(bert_input['input_ids'])
token_type_ids_list.append(bert_input['token_type_ids'])
attention_mask_list.append(bert_input['attention_mask'])
dataset = tf.data.Dataset.from_tensor_slices(
(input_ids_list, attention_mask_list, token_type_ids_list)
)
dataset_mapped = dataset.map(map_test_example_to_dict)
return dataset_mapped
def map_test_example_to_dict(input_ids, attention_masks, token_type_ids):
mapped_example = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_masks,
}
return mapped_example<categorify>
|
model.layers[:-1]
|
Digit Recognizer
|
231,224 |
X_test = test_data["text"]
test_dataset = list(X_test)
ds_test_encoded = encode_test_examples(test_dataset ).batch(BATCH_SIZE )<predict_on_test>
|
layer_names = []
for layer in model.layers[:-1]:
layer_names.append(layer.name)
images_per_row = 16
for layer_name, layer_activation in zip(layer_names, activations):
if layer_name.startswith('conv'):
n_features = layer_activation.shape[-1]
size = layer_activation.shape[1]
n_cols = n_features // images_per_row
display_grid = np.zeros(( size * n_cols, images_per_row * size))
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0,:, :, col * images_per_row + row]
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255 ).astype('uint8')
display_grid[col * size :(col + 1)* size,
row * size :(row + 1)* size] = channel_image
scale = 1./ size
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis' )
|
Digit Recognizer
|
231,224 |
test_predictions = model.predict(ds_test_encoded)
test_probabilities = softmax(test_predictions[0], axis=1)
y_test_predictions = np.argmax(test_probabilities, axis=1 ).flatten()<save_to_csv>
|
layer_names = []
for layer in model.layers[:-1]:
layer_names.append(layer.name)
images_per_row = 16
for layer_name, layer_activation in zip(layer_names, activations):
if layer_name.startswith('max'):
n_features = layer_activation.shape[-1]
size = layer_activation.shape[1]
n_cols = n_features // images_per_row
display_grid = np.zeros(( size * n_cols, images_per_row * size))
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0,:, :, col * images_per_row + row]
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255 ).astype('uint8')
display_grid[col * size :(col + 1)* size,
row * size :(row + 1)* size] = channel_image
scale = 1./ size
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis' )
|
Digit Recognizer
|
231,224 |
final_submission = pd.DataFrame(data={"id":test_data["id"], "target":y_test_predictions})
final_submission.to_csv("submissionTweets.csv", index=False )<set_options>
|
layer_names = []
for layer in model.layers[:-1]:
layer_names.append(layer.name)
images_per_row = 16
for layer_name, layer_activation in zip(layer_names, activations):
if layer_name.startswith('drop'):
n_features = layer_activation.shape[-1]
size = layer_activation.shape[1]
n_cols = n_features // images_per_row
display_grid = np.zeros(( size * n_cols, images_per_row * size))
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0,:, :, col * images_per_row + row]
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255 ).astype('uint8')
display_grid[col * size :(col + 1)* size,
row * size :(row + 1)* size] = channel_image
scale = 1./ size
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis' )
|
Digit Recognizer
|
231,224 |
warnings.filterwarnings('ignore')
<categorify>
|
Y_pred = model.predict(X_val)
Y_pred_classes = np.argmax(Y_pred, axis = 1)
Y_true_classes = np.argmax(Y_val, axis = 1 )
|
Digit Recognizer
|
231,224 |
def bert_encode(texts, tokenizer, max_len=512):
all_tokens = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[:max_len-2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence)
tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence)+ [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
return np.array(all_tokens)
def build_model(transformer, max_len=512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
sequence_output = transformer(input_word_ids)[0]
cls_token = sequence_output[:, 0, :]
out = Dense(1, activation='sigmoid' )(cls_token)
model = Model(inputs=input_word_ids, outputs=out)
model.compile(Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])
return model<load_from_csv>
|
target_names = ["Class {}".format(i)for i in range(num_classes)]
print(classification_report(Y_true_classes, Y_pred_classes, target_names=target_names))
|
Digit Recognizer
|
231,224 |
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv" )<choose_model_class>
|
predicted_classes = model.predict_classes(X_test)
submissions=pd.DataFrame({"ImageId": list(range(1,len(predicted_classes)+1)) ,
"Label": predicted_classes})
submissions.to_csv("asd.csv", index=False, header=True )
|
Digit Recognizer
|
231,224 |
<split><EOS>
|
model.save('my_model_1.h5')
json_string = model.to_json()
|
Digit Recognizer
|
10,869,544 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_test_metric>
|
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
X_train =(train.iloc[:,1:].values ).astype('float32')
y_train = train.iloc[:,0].values.astype('int32')
X_test = test.values.astype('float32')
X_train_scaled = X_train.reshape(X_train.shape[0], 28, 28,1)/ 255
X_test_scaled = X_test.reshape(X_test.shape[0], 28, 28,1)/ 255
y_train = to_categorical(y_train)
X_train_mean = train.iloc[:, 1:].sum().sum() /(train.shape[0] * train.shape[1])
X_test_mean = test.iloc[:, 1:].sum().sum() /(test.shape[0] * test.shape[1])
X_train_normalized =(( X_train - X_train_mean)/ X_train_mean ).reshape(train.shape[0], 28, 28,1)
X_test_normalized =(( X_test - X_test_mean)/ X_test_mean ).reshape(test.shape[0], 28, 28,1 )
|
Digit Recognizer
|
10,869,544 |
def metrics(y_true, y_pred):
print("
F1-score: ", round(f1_score(y_true, y_pred), 2))
print("Precision: ", round(precision_score(y_true, y_pred), 2))
print("Recall: ", round(recall_score(y_true, y_pred), 2))<train_model>
|
def format_predictions(model, test_data=X_test_scaled):
preds = model.predict(test_data)
preds_test = []
for i in preds:
preds_test.append(np.argmax(i))
return preds_test
early_stop = EarlyStopping(monitor='val_loss',
patience=20,
mode='min',
restore_best_weights=True)
def scheduler(epoch, lr):
if epoch < 20:
return lr
return lr *(0.9 **(( epoch - 19)// 10))
rate_scheduler = LearningRateScheduler(scheduler )
|
Digit Recognizer
|
10,869,544 |
start_time = time.time()
train_history = model.fit(X_train, y_train, epochs = 3, batch_size = 8)
end_time = time.time()
print("
=>Training time :", round(end_time - start_time, 1), 's' )<predict_on_test>
|
def conv_block1(In, std):
out = SeparableConv2D(16,(3, 3), kernel_initializer=TruncatedNormal(0, std, 1))(In)
out = BatchNormalization()(out)
out = SeparableConv2D(16,(1, 1), kernel_initializer=TruncatedNormal(0, std-1e-5, 1))(out)
return out
def conv_block2(In, std):
out = SeparableConv2D(16,(3, 1), kernel_initializer=TruncatedNormal(0, std, 1), padding='same' )(In)
out = SeparableConv2D(16,(1, 3), kernel_initializer=TruncatedNormal(0, std+1e-4, 1), padding='same' )(out)
out = MaxPooling2D(2 )(out)
return out
def conv_block3(In, std):
out = SeparableConv2D(16,(5, 3), kernel_initializer=TruncatedNormal(0, std, 1), padding='same' )(In)
out = BatchNormalization()(out)
out = SeparableConv2D(16,(3, 5), kernel_initializer=TruncatedNormal(0, std+1e-4, 1))(out)
out = MaxPooling2D(2 )(out)
return out
|
Digit Recognizer
|
10,869,544 |
start_time = time.time()
test_pred = model.predict(X_test, verbose=1 ).round().astype(int)
end_time = time.time()
print('
=>Average Inference Time :', round(( end_time - start_time)/ len(test_pred)* 1000, 1), 'ms')
metrics(y_test, test_pred )<save_to_csv>
|
scaled_input = tf.keras.Input(shape=(28,28,1))
norm_in = BatchNormalization(name='norm_in' )(scaled_input)
out1 = []
for i in range(0, 8):
out1.append(conv_block1(norm_in, 1e3/10**(i)))
out2 = []
for i in range(0, 8, 2):
out2.append(Add()([out1[i], out1[i+1]]))
for i in range(0, len(out2)) :
out2[i] = conv_block2(out2[i], 1e4/10**(i))
out2[i] = conv_block3(out2[i], 10/10**(i // 2))
flats = Concatenate()([Flatten()(out)for out in out2])
d1 = Dense(1024,
kernel_initializer=TruncatedNormal(mean=10, stddev=2, seed=11),
bias_initializer=TruncatedNormal(mean=0, stddev=0.5, seed=11),
kernel_regularizer=l1(0.01)
)(flats)
d1 = BatchNormalization()(d1)
d2 = Dense(1024,
kernel_initializer=TruncatedNormal(mean=1, stddev=0.02, seed=11),
bias_initializer=TruncatedNormal(mean=0, stddev=0.5, seed=11),
kernel_regularizer=l1(0.01)
)(d1)
d2 = BatchNormalization()(d2)
final_prediction = Dense(10, activation='softmax' )(d2)
model = tf.keras.Model(inputs=[scaled_input], outputs=[final_prediction] )
|
Digit Recognizer
|
10,869,544 |
submission['target'] = model.predict(test_input, verbose=1 ).round().astype(int)
submission.to_csv('submission.csv', index=False )<install_modules>
|
model.count_params() / 1e6
|
Digit Recognizer
|
10,869,544 |
!pip install transformers<import_modules>
|
model.compile(optimizer=Adam(0.0075),
loss=CategoricalCrossentropy() ,
metrics=['accuracy'] )
|
Digit Recognizer
|
10,869,544 |
import torch
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, get_linear_schedule_with_warmup
from transformers import BertForSequenceClassification, AdamW, BertConfig
import torch.nn.functional as F
import pandas as pd
import numpy as np
import time
import datetime
import random, re
import seaborn as sns
import matplotlib.pyplot as plt<set_options>
|
def scheduler(epoch, lr):
if epoch < 20:
return lr
elif lr > 5e-5:
return lr *(0.95 **(epoch // 10 - 1))
return lr
hist = model.fit(X_train_scaled, y_train, epochs= 2000, batch_size=128,
callbacks=[
LearningRateScheduler(scheduler),
tfdocs.modeling.EpochDots() ,
ReduceLROnPlateau(
monitor='val_accuracy', factor=0.6, patience=20,
verbose=1, mode='auto', min_delta=1e-4, cooldown=0, min_lr=1e-15
)
],
verbose=True,
validation_split=0.33,
shuffle=True )
|
Digit Recognizer
|
10,869,544 |
if torch.cuda.is_available() :
device = torch.device("cuda")
print('There are %d GPU(s)available.{}'.format(torch.cuda.device_count()))
print('We will use the GPU: {}'.format(torch.cuda.get_device_name(0)))
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val )<load_from_csv>
|
submission = pd.DataFrame({
"ImageId": [i+1 for i in range(0, 28000)],
"Label": format_predictions(model)
})
submission.to_csv('s.csv', index=False )
|
Digit Recognizer
|
10,869,544 |
train = pd.read_csv('.. /input/nlp-getting-started/train.csv')
test = pd.read_csv('.. /input/nlp-getting-started/test.csv')
pd.set_option('display.max_colwidth', 150)
train.head()<train_model>
|
try :
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)
except:
print('No TPU being used!')
|
Digit Recognizer
|
10,869,544 |
print('Number of training sentences: {:,}
'.format(train.shape[0]))
print('Number of test sentences: {:,}
'.format(test.shape[0]))<categorify>
|
Digit Recognizer
|
|
10,869,544 |
def clean_text(text):
text = text.lower()
text = re.sub(r'[!]+','!',text)
text = re.sub(r'[?]+','?',text)
text = re.sub(r'[.]+','.',text)
text = re.sub(r"'","",text)
text = re.sub('\s+', '', text ).strip()
text = re.sub(r'&?',r'and', text)
text = re.sub(r"https?:\/\/t.co\/[A-Za-z0-9]+", "", text)
text = re.sub(r'[:"$%&\*+,-/:;<=>@\\^_`{|}~]+','',text)
return text
train.head()<load_pretrained>
|
Digit Recognizer
|
|
10,869,544 |
sentences = train.text.values
labels = train.target.values
sentences_test = test.text.values
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True )<categorify>
|
Digit Recognizer
|
|
10,869,544 |
print(' Original: ', sentences[0])
print('Tokenized: ', tokenizer.tokenize(sentences[0]))
print('Token IDs: ', tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentences[0])))
<define_variables>
|
Digit Recognizer
|
|
10,869,544 |
max_len = 0
for sent in sentences:
input_ids = tokenizer.encode(sent, add_special_tokens=True)
max_len = max(max_len, len(input_ids))
print('Max sentence length: ', max_len)
<categorify>
|
Digit Recognizer
|
|
10,427,138 |
input_ids = []
attention_masks = []
for sent in sentences:
encoded_dict = tokenizer.encode_plus(
sent,
add_special_tokens = True,
max_length = 64,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt',
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
print('Original sentence: ', sentences[0])
print('Token IDs list:', input_ids[0])
<split>
|
%matplotlib inline
|
Digit Recognizer
|
10,427,138 |
dataset = TensorDataset(input_ids, attention_masks, labels)
train_size = int(0.8 * len(dataset))
val_size = len(dataset)- train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
print('{:>5,} training samples'.format(train_size))
print('{:>5,} validation samples'.format(val_size))<load_pretrained>
|
train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
10,427,138 |
batch_size = 32
train_dataloader = DataLoader(
train_dataset,
sampler = RandomSampler(train_dataset),
batch_size = batch_size
)
validation_dataloader = DataLoader(
val_dataset,
sampler = SequentialSampler(val_dataset),
batch_size = batch_size
)<load_pretrained>
|
x_train = np.array(x_train)
test = np.array(test )
|
Digit Recognizer
|
10,427,138 |
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased",
num_labels = 2,
output_attentions = False,
output_hidden_states = False,
)
model.cuda()<choose_model_class>
|
y_train = to_categorical(y_train,10 )
|
Digit Recognizer
|
10,427,138 |
optimizer = AdamW(model.parameters() ,
lr = 2e-5,
eps = 1e-8
)
epochs = 4
total_steps = len(train_dataloader)* epochs
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0,
num_training_steps = total_steps )<compute_test_metric>
|
x_train, x_val, y_train, y_val=train_test_split(x_train,y_train,test_size=0.1 )
|
Digit Recognizer
|
10,427,138 |
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1 ).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat)/ len(labels_flat)
def format_time(elapsed):
elapsed_rounded = int(round(( elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded))<train_model>
|
model = Sequential()
|
Digit Recognizer
|
10,427,138 |
training_stats = []
total_t0 = time.time()
for epoch_i in range(0, epochs):
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
t0 = time.time()
total_train_loss = 0
model.train()
for step, batch in enumerate(train_dataloader):
if step % 40 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
print(' Batch {:>5,} of {:>5,}.Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
loss, logits = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
total_train_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters() , 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_train_loss / len(train_dataloader)
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
print("")
print("Running Validation...")
t0 = time.time()
model.eval()
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
for batch in validation_dataloader:
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
with torch.no_grad() :
(loss, logits)= model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
total_eval_loss += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu' ).numpy()
total_eval_accuracy += flat_accuracy(logits, label_ids)
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
avg_val_loss = total_eval_loss / len(validation_dataloader)
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took {:} time".format(validation_time))
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid.Loss': avg_val_loss,
'Valid.Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
print("")
print("Training complete!")
print("Total training took {:}(h:mm:ss)".format(format_time(time.time() -total_t0)) )<categorify>
|
model.add(Input(shape=(28, 28, 1)))
model.add(Conv2D(filters=64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=(1,1)))
model.add(Conv2D(filters=64, kernel_size =(3,3),padding = 'Same',activation ='relu'))
model.add(MaxPooling2D(pool_size=(1,1)))
model.add(Conv2D(filters=64, kernel_size =(3,3),padding = 'Same',activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(128,(2,2),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128,(2,2),padding = 'Same', activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
10,427,138 |
print('Number of test sentences: {:,}
'.format(test.shape[0]))
sentences_test = test.text.values
input_ids = []
attention_masks = []
for sent in sentences_test:
encoded_dict = tokenizer.encode_plus(
sent,
add_special_tokens = True,
max_length = 64,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt',
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
batch_size = batch_size
prediction_data = TensorDataset(input_ids, attention_masks)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size )<predict_on_test>
|
model.compile(loss=keras.losses.categorical_crossentropy,\
optimizer = tf.keras.optimizers.Adam() ,\
metrics=['accuracy'] )
|
Digit Recognizer
|
10,427,138 |
print('Predicting labels for {:,} test sentences...'.format(len(input_ids)))
model.eval()
predictions = []
for batch in prediction_dataloader:
batch = tuple(t.to(device)for t in batch)
b_input_ids, b_input_mask = batch
with torch.no_grad() :
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = outputs[0]
predictions.append(logits)
print('DONE!')
<concatenate>
|
model.fit(x_train,y_train, batch_size = 128, epochs = 30, validation_data=(x_val,y_val))
|
Digit Recognizer
|
10,427,138 |
all_logits = torch.cat(predictions, dim=0)
probs = F.softmax(all_logits, dim=1 ).cpu().numpy()
probs<prepare_output>
|
y_pred = model.predict(x_val)
y_pred_classes = np.argmax(y_pred,axis = 1)
y_pred_classes
y_true = np.argmax(y_val,axis = 1)
y_true
confusion_mtx = confusion_matrix(y_true, y_pred_classes)
confusion_mtx
|
Digit Recognizer
|
10,427,138 |
threshold = 0.5
preds = np.where(probs[:, 1] > threshold, 1, 0)
preds<count_values>
|
model.evaluate(x_val,y_val,verbose=0 )
|
Digit Recognizer
|
10,427,138 |
print("Number of tweets labeled as true disaster tweet: ", preds.sum() )<prepare_x_and_y>
|
train_image_generator = ImageDataGenerator()
|
Digit Recognizer
|
10,427,138 |
Y_test = preds<save_to_csv>
|
history = model.fit_generator(train_image_generator.flow(x_train,y_train, batch_size =32),epochs = 3,validation_data =(x_val,y_val))
|
Digit Recognizer
|
10,427,138 |
df_submission = pd.read_csv('.. /input/nlp-getting-started/sample_submission.csv', index_col=0 ).fillna('')
df_submission['target'] = Y_test
df_submission.to_csv('submission.csv')
!head submission.csv<set_options>
|
y_pred = model.predict(x_val)
y_pred_classes = np.argmax(y_pred,axis = 1)
y_pred_classes
|
Digit Recognizer
|
10,427,138 |
warnings.filterwarnings('ignore')
stop = set(stopwords.words('english'))
%matplotlib inline
plt.style.use('ggplot')
<load_from_csv>
|
y_true = np.argmax(y_val,axis = 1)
y_true
|
Digit Recognizer
|
10,427,138 |
train = pd.read_csv('.. /input/nlp-getting-started/train.csv')
test = pd.read_csv('.. /input/nlp-getting-started/test.csv')
submission = pd.read_csv('.. /input/nlp-getting-started/sample_submission.csv')
train_sent, test_sent, train_label = train.text.values, test.text.values, train.target.values<train_model>
|
confusion_mtx = confusion_matrix(y_true, y_pred_classes)
confusion_mtx
|
Digit Recognizer
|
10,427,138 |
word_tokenizer = Tokenizer()
word_tokenizer.fit_on_texts(train_sent)
vocab_length = len(word_tokenizer.word_index)+ 1<string_transform>
|
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
10,427,138 |
<feature_engineering><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False )
|
Digit Recognizer
|
10,403,729 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<split>
|
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPool2D
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
|
Digit Recognizer
|
10,403,729 |
X_train, X_test, y_train, y_test = train_test_split(padded_sentences,
train_label,
test_size=0.25,
random_state=42,
shuffle=True)
<train_model>
|
training_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
10,403,729 |
def training(model, model_name):
checkpoint = ModelCheckpoint(model_name + '.h5', monitor = 'val_loss', verbose = 1, save_best_only = True)
reduce_lr = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, verbose = 1, patience = 5, min_lr = 0.001)
early_stop = EarlyStopping(monitor='val_loss', patience=1)
start_time = time.time()
history = model.fit(X_train, y_train,
epochs = 100,
batch_size = 32,
validation_data = [X_test, y_test],
verbose = 1,
callbacks = [reduce_lr, checkpoint, early_stop])
end_time = time.time()
print("
=>Training time :", round(end_time - start_time, 1), 's')
model.load_weights(model_name + '.h5')
start_time = time.time()
preds = model.predict_classes(X_test)
end_time = time.time()
print('
=>Average Inference Time :', round(( end_time - start_time)/ len(X_test)* 1000, 1), 'ms')
print('
=>Model Size :', round(os.stat(model_name + '.h5' ).st_size /(1024 ** 2), 1), 'MB')
metrics(y_test, preds)
plot(history, [['loss', 'val_loss'],['accuracy', 'val_accuracy']] )<choose_model_class>
|
X_train = training_data.drop('label', axis=1 ).values
y_train = training_data[['label']].values
X_test = test_data.values
test_data.shape
|
Digit Recognizer
|
10,403,729 |
def CNN() :
model = Sequential()
model.add(Embedding(input_dim=embedding_matrix.shape[0],
output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=length_long_sentence))
model.add(Conv1D(filters=32, kernel_size=8, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return model
model = CNN()
model.summary()<train_model>
|
batch_size = 128
num_classes = 10
epochs = 12
img_rows, img_cols = 28, 28
input_shape =(1, img_rows, img_cols )
|
Digit Recognizer
|
10,403,729 |
training(model, 'model_cnn' )<choose_model_class>
|
X_train = X_train.reshape(-1,28,28,1)
X_test = X_test.reshape(-1,28,28,1 )
|
Digit Recognizer
|
10,403,729 |
def RNN() :
model = Sequential()
model.add(Embedding(input_dim=embedding_matrix.shape[0],
output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=length_long_sentence))
model.add(Bidirectional(SimpleRNN(length_long_sentence, return_sequences = True, recurrent_dropout=0.2)))
model.add(GlobalMaxPool1D())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(length_long_sentence, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(length_long_sentence, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return model
model = RNN()
model.summary()<train_model>
|
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('x_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples' )
|
Digit Recognizer
|
10,403,729 |
training(model, 'model_rnn' )<choose_model_class>
|
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, stratify=y_train,
test_size=0.15, random_state=42 )
|
Digit Recognizer
|
10,403,729 |
def BiGRU() :
model = Sequential()
model.add(Embedding(input_dim=embedding_matrix.shape[0],
output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=length_long_sentence))
model.add(Bidirectional(GRU(length_long_sentence, return_sequences = True, recurrent_dropout=0.2)))
model.add(GlobalMaxPool1D())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(length_long_sentence, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(length_long_sentence, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return model
model = BiGRU()
model.summary()<train_model>
|
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_train.shape, y_val.shape
|
Digit Recognizer
|
10,403,729 |
training(model, 'model_bigru' )<choose_model_class>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
10,403,729 |
def BiLSTM() :
model = Sequential()
model.add(Embedding(input_dim=embedding_matrix.shape[0],
output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=length_long_sentence))
model.add(Bidirectional(LSTM(length_long_sentence, return_sequences = True, recurrent_dropout=0.2)))
model.add(GlobalMaxPool1D())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(length_long_sentence, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(length_long_sentence, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return model
model = BiLSTM()
model.summary()<train_model>
|
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'] )
|
Digit Recognizer
|
10,403,729 |
training(model, 'model_bilstm' )<save_to_csv>
|
model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(X_val, y_val))
|
Digit Recognizer
|
10,403,729 |
submission.target = model.predict_classes(test_sentences)
submission.to_csv("submission.csv", index=False )<import_modules>
|
score = model.evaluate(X_val, y_val, verbose=0)
score
|
Digit Recognizer
|
10,403,729 |
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt<load_from_csv>
|
results = model.predict(X_test)
results = np.argmax(results, axis = 1)
results = pd.Series(results, name="Label" )
|
Digit Recognizer
|
10,403,729 |
<count_values><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"), results],axis = 1)
submission.to_csv("cnn_mnist.csv",index=False )
|
Digit Recognizer
|
10,394,678 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values>
|
!pip install -q efficientnet_pytorch
|
Digit Recognizer
|
10,394,678 |
df.keyword.value_counts(dropna=False )<count_values>
|
import time
import random
import datetime
import os
import numpy as np
import pandas as pd
from sklearn import model_selection
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import efficientnet_pytorch
import cv2
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import matplotlib.pyplot as plt
|
Digit Recognizer
|
10,394,678 |
df.location.value_counts(dropna=False )<feature_engineering>
|
SEED = 42
|
Digit Recognizer
|
10,394,678 |
glove = {}
with open(".. /input/glove6b/glove.6B.100d.txt")as f:
for line in f:
glove[line.split() [0]] = line.split() [1:]<count_values>
|
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
seed_everything(SEED )
|
Digit Recognizer
|
10,394,678 |
word_counts = df.text.str.lower().str.split().explode().value_counts()
word_counts.cumsum() [10000] / word_counts.sum()<string_transform>
|
class DataLoaderConfig:
batch_size = 64
num_workers = 8
class TrainConfig:
criterion = nn.CrossEntropyLoss
n_epochs = 10
lr = 0.001
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau
scheduler_params = dict(
mode='min',
factor=0.5,
patience=1,
verbose=False,
threshold=0.0001,
threshold_mode='abs',
cooldown=0,
min_lr=1e-8,
eps=1e-08
)
DEVICE = torch.device('cuda')if torch.cuda.is_available() else torch.device('cpu' )
|
Digit Recognizer
|
10,394,678 |
NUM_WORDS = 10000
MAXLEN = 30
texts = df.text.str.lower()
tokenizer = keras.preprocessing.text.Tokenizer(num_words=NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
data = keras.preprocessing.sequence.pad_sequences(sequences, maxlen=MAXLEN )<define_variables>
|
df = pd.read_csv('.. /input/digit-recognizer/train.csv')
print(df.shape)
df.head()
|
Digit Recognizer
|
10,394,678 |
labels = df.target<prepare_x_and_y>
|
y = df['label'].values
X = df.drop(['label'], axis=1 ).values
|
Digit Recognizer
|
10,394,678 |
x_train = data
y_train = labels<define_variables>
|
X_train, X_valid, y_train, y_valid = model_selection.train_test_split(X, y, test_size=0.2)
X_train.shape, X_valid.shape, y_train.shape, y_valid.shape
|
Digit Recognizer
|
10,394,678 |
EMBEDDING_DIM = len(glove["the"])
embedding_matrix = np.zeros(( NUM_WORDS, EMBEDDING_DIM))
for word, i in word_index.items() :
if i < NUM_WORDS:
embedding_vector = glove.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector<choose_model_class>
|
class DatasetRetriever(Dataset):
def __init__(self, X, y, transforms=None):
super().__init__()
self.X = X.reshape(-1, 28, 28 ).astype(np.float32)
self.y = y
self.transforms = transforms
def __getitem__(self, index):
image, target = self.X[index], self.y[index]
image = np.stack([image] * 3, axis=-1)
image /= 255.
if self.transforms:
image = self.transforms(image=image)['image']
return image, torch.tensor(target, dtype=torch.long)
def __len__(self):
return self.y.shape[0]
|
Digit Recognizer
|
10,394,678 |
model = keras.Sequential([
layers.Embedding(NUM_WORDS, EMBEDDING_DIM, input_length=MAXLEN,
name='embedding'),
layers.Bidirectional(layers.GRU(32,
dropout=.2,
recurrent_dropout=.2,)) ,
layers.Dense(1, activation='sigmoid'),
])
model.get_layer('embedding' ).set_weights([embedding_matrix])
model.get_layer('embedding' ).trainable = False
optimizer = keras.optimizers.RMSprop(lr=1e-2)
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['acc'])
model.summary()<train_model>
|
def get_train_transforms() :
return A.Compose(
[
A.Rotate(limit=10, border_mode=cv2.BORDER_REPLICATE, p=0.5),
A.Cutout(num_holes=8, max_h_size=2, max_w_size=2, fill_value=0, p=0.5),
A.Cutout(num_holes=8, max_h_size=1, max_w_size=1, fill_value=1, p=0.5),
A.Resize(32, 32, p=1.) ,
ToTensorV2(p=1.0),
],
p=1.0)
def get_valid_transforms() :
return A.Compose(
[
A.Resize(32, 32, p=1.) ,
ToTensorV2(p=1.0),
],
p=1.0
)
|
Digit Recognizer
|
10,394,678 |
early_stopping = keras.callbacks.EarlyStopping(
patience=10,
restore_best_weights=True,
)
lr_decay = keras.callbacks.ReduceLROnPlateau()
history = model.fit(
x_train, y_train,
epochs=50,
batch_size=32,
validation_split=.2,
callbacks=[early_stopping, lr_decay]
)<load_from_csv>
|
train_dataset = DatasetRetriever(
X = X_train,
y = y_train,
transforms=get_train_transforms() ,
)
valid_dataset = DatasetRetriever(
X = X_valid,
y = y_valid,
transforms=get_valid_transforms() ,
)
|
Digit Recognizer
|
10,394,678 |
test_df = pd.read_csv(".. /input/nlp-getting-started/test.csv",
index_col=0)
def preprocess(texts, labels=None, tokenizer=tokenizer):
NUM_WORDS = 10000
MAXLEN = 30
texts = pd.Series(texts ).str.lower()
sequences = tokenizer.texts_to_sequences(texts)
data = keras.preprocessing.sequence.pad_sequences(sequences, maxlen=MAXLEN)
return data, np.array(labels)
x_test,_ = preprocess(test_df.text)
preds = model.predict(x_test)
preds<save_to_csv>
|
train_loader = DataLoader(
train_dataset,
batch_size=DataLoaderConfig.batch_size,
shuffle=True,
num_workers=DataLoaderConfig.num_workers,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=DataLoaderConfig.batch_size,
shuffle=False,
num_workers=DataLoaderConfig.num_workers,
)
|
Digit Recognizer
|
10,394,678 |
answer_df = pd.read_csv(
'.. /input/nlp-getting-started/sample_submission.csv',
index_col=0
)
answer_df['target'] =(preds > 0.5 ).astype('uint8')
answer_df.to_csv('submission.csv')
!head submission.csv<install_modules>
|
class LossMeter:
def __init__(self):
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class AccMeter:
def __init__(self):
self.true_count = 0
self.all_count = 0
self.avg = 0
def update(self, y_true, y_pred):
y_true = y_true.cpu().numpy().astype(int)
y_pred = y_pred.cpu().numpy().argmax(axis=1 ).astype(int)
self.true_count +=(y_true == y_pred ).sum()
self.all_count += y_true.shape[0]
self.avg = self.true_count / self.all_count
|
Digit Recognizer
|
10,394,678 |
!pip install -U tensorflow-text==2.3<install_modules>
|
class Fitter:
def __init__(
self, model, device, criterion, n_epochs,
lr, sheduler=None, scheduler_params=None
):
self.epoch = 0
self.n_epochs = n_epochs
self.base_dir = './'
self.log_path = f'{self.base_dir}/log.txt'
self.best_summary_loss = np.inf
self.model = model
self.device = device
self.optimizer = torch.optim.Adam(self.model.parameters() , lr=lr)
if sheduler:
self.scheduler = sheduler(self.optimizer, **scheduler_params)
self.criterion = criterion().to(self.device)
self.log(f'Fitter prepared.Device is {self.device}')
def fit(self, train_loader, valid_loader):
for e in range(self.n_epochs):
current_lr = self.optimizer.param_groups[0]['lr']
self.log(f'
{datetime.datetime.utcnow().isoformat() }
LR: {current_lr}')
t = int(time.time())
summary_loss, final_scores = self.train_one_epoch(train_loader)
self.log(
f'[RESULT]: Train.Epoch: {self.epoch}, ' + \
f'summary_loss: {summary_loss.avg:.5f}, ' + \
f'final_score: {final_scores.avg:.5f}, ' + \
f'time: {int(time.time())- t} s'
)
t = int(time.time())
summary_loss, final_scores = self.validation(valid_loader)
self.log(
f'[RESULT]: Valid.Epoch: {self.epoch}, ' + \
f'summary_loss: {summary_loss.avg:.5f}, ' + \
f'final_score: {final_scores.avg:.5f}, ' + \
f'time: {int(time.time())- t} s'
)
f_best = 0
if summary_loss.avg < self.best_summary_loss:
self.best_summary_loss = summary_loss.avg
f_best = 1
self.scheduler.step(metrics=summary_loss.avg)
self.save(f'{self.base_dir}/last-checkpoint.bin')
if f_best:
self.save(f'{self.base_dir}/best-checkpoint.bin')
print('New best checkpoint')
self.epoch += 1
def validation(self, val_loader):
self.model.eval()
summary_loss = LossMeter()
final_scores = AccMeter()
t = int(time.time())
for step,(images, targets)in enumerate(val_loader):
print(
f'Valid Step {step}/{len(val_loader)}, ' + \
f'summary_loss: {summary_loss.avg:.5f}, ' + \
f'final_score: {final_scores.avg:.5f}, ' + \
f'time: {int(time.time())- t} s', end='\r'
)
with torch.no_grad() :
targets = targets.to(self.device)
images = images.to(self.device)
batch_size = images.shape[0]
outputs = self.model(images)
loss = self.criterion(outputs, targets)
final_scores.update(targets, outputs)
summary_loss.update(loss.detach().item() , batch_size)
return summary_loss, final_scores
def train_one_epoch(self, train_loader):
self.model.train()
summary_loss = LossMeter()
final_scores = AccMeter()
t = int(time.time())
for step,(images, targets)in enumerate(train_loader):
print(
f'Train Step {step}/{len(train_loader)}, ' + \
f'summary_loss: {summary_loss.avg:.5f}, ' + \
f'final_score: {final_scores.avg:.5f}, ' + \
f'time: {int(time.time())- t} s', end='\r'
)
targets = targets.to(self.device)
images = images.to(self.device)
batch_size = images.shape[0]
self.optimizer.zero_grad()
outputs = self.model(images)
loss = self.criterion(outputs, targets)
loss.backward()
final_scores.update(targets, outputs.detach())
summary_loss.update(loss.detach().item() , batch_size)
self.optimizer.step()
return summary_loss, final_scores
def save(self, path):
self.model.eval()
torch.save({
'model_state_dict': self.model.state_dict() ,
'optimizer_state_dict': self.optimizer.state_dict() ,
'scheduler_state_dict': self.scheduler.state_dict() ,
'best_summary_loss': self.best_summary_loss,
'epoch': self.epoch,
}, path)
def load(self, path):
checkpoint = torch.load(path)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
self.best_summary_loss = checkpoint['best_summary_loss']
self.epoch = checkpoint['epoch'] + 1
def log(self, message):
print(message)
with open(self.log_path, 'a+')as logger:
logger.write(f'{message}
' )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.