kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
11,475,252 |
sub = pd.merge(
sample_submission,
subm,
how="left",
left_on='ID',
right_on='image_id',
)<count_missing_values>
|
def model() :
model = Sequential()
model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape =(28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Conv2D(128, kernel_size = 4, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
adam = tf.keras.optimizers.Adam(
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
amsgrad=False)
model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=["accuracy"])
early = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)
checkpoint_path = 'training_1/cp.ckpt'
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
print(model.summary())
history = model.fit(xtrain, ytrain, epochs=100, callbacks=[cp_callback, early], validation_data=(xval, yval))
prediction = model.predict(test)
prediction = np.argmax(prediction, axis=1)
return history, prediction
|
Digit Recognizer
|
11,475,252 |
def isNaN(num):
return num != num<feature_engineering>
|
history, prediction = model()
|
Digit Recognizer
|
11,475,252 |
for i, row in sub.iterrows() :
if isNaN(row['pred']): continue
sub.PredictionString.loc[i] = row['pred']<save_to_csv>
|
data = {"ImageId": image_id, "Label":prediction}
results = pd.DataFrame(data)
results.to_csv("result.csv",index=False )
|
Digit Recognizer
|
11,475,252 |
sub.to_csv('submission_1.csv', index=False )<load_from_csv>
|
from keras.preprocessing.image import ImageDataGenerator
|
Digit Recognizer
|
11,475,252 |
cell_df = pd.read_csv('cell_df.csv')
cell_df.head()
cell_df['cls'] = ''<feature_engineering>
|
datagen_train = datagen_valid = ImageDataGenerator(
featurewise_center = False,
samplewise_center = False,
featurewise_std_normalization = False,
samplewise_std_normalization = False,
zca_whitening = False,
horizontal_flip = False,
vertical_flip = False,
fill_mode = 'nearest',
rotation_range = 10,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1)
datagen_train.fit(xtrain)
train_gen = datagen_train.flow(xtrain, ytrain, batch_size=64)
datagen_valid.fit(xval)
valid_gen = datagen_valid.flow(xval, yval, batch_size=64 )
|
Digit Recognizer
|
11,475,252 |
threshold = 0.0
for i in range(preds.shape[0]):
p = torch.nonzero(preds[i] > threshold ).squeeze().numpy().tolist()
if type(p)!= list: p = [p]
if len(p)== 0: cls = [(preds[i].argmax().item() , preds[i].max().item())]
else: cls = [(x, preds[i][x].item())for x in p]
cell_df['cls'].loc[i] = cls<categorify>
|
model = Sequential([
Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same', input_shape =(28,28,1)) ,
Conv2D(32, kernel_size=(3, 3), activation='relu'),
BatchNormalization() ,
MaxPool2D(pool_size=(2, 2)) ,
Dropout(0.2),
Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'),
Conv2D(64, kernel_size=(3, 3), activation='relu'),
BatchNormalization() ,
MaxPool2D(pool_size=(2, 2)) ,
Dropout(0.2),
Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'),
Conv2D(128, kernel_size=(3, 3), activation='relu'),
BatchNormalization() ,
MaxPool2D(pool_size=(2, 2)) ,
Dropout(0.2),
Flatten() ,
Dense(512, activation='relu'),
Dropout(0.5),
Dense(10, activation = "softmax")
])
adam = tf.keras.optimizers.Adam(
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
early = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=50)
checkpoint_path = 'training_1/cp.ckpt'
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
print(model.summary())
history = model.fit(( train_gen), epochs=100, callbacks=[cp_callback, early], validation_data=(valid_gen))
prediction = model.predict(test)
prediction = np.argmax(prediction, axis=1 )
|
Digit Recognizer
|
11,475,252 |
<feature_engineering><EOS>
|
data = {"ImageId": image_id, "Label":prediction}
results = pd.DataFrame(data)
results.to_csv("result_data_generator.csv",index=False )
|
Digit Recognizer
|
11,486,725 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<groupby>
|
mnist_test = pd.read_csv(".. /input/mnist-in-csv/mnist_test.csv")
mnist_train = pd.read_csv(".. /input/mnist-in-csv/mnist_train.csv" )
|
Digit Recognizer
|
11,486,725 |
subm = cell_df.groupby(['image_id'])['pred'].apply(lambda x: ' '.join(x)).reset_index()
subm.head()<load_from_csv>
|
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
|
Digit Recognizer
|
11,486,725 |
sample_submission = pd.read_csv('.. /input/hpa-single-cell-image-classification/sample_submission.csv')
sample_submission.head()<merge>
|
test['dataset'] = 'test'
|
Digit Recognizer
|
11,486,725 |
sub = pd.merge(
sample_submission,
subm,
how="left",
left_on='ID',
right_on='image_id',
)
sub.head()<feature_engineering>
|
train['dataset'] = 'train'
|
Digit Recognizer
|
11,486,725 |
def isNaN(num):
return num != num
for i, row in sub.iterrows() :
if isNaN(row['pred']): continue
sub.PredictionString.loc[i] = row['pred']<save_to_csv>
|
dataset = pd.concat([train.drop('label', axis=1), test] ).reset_index()
|
Digit Recognizer
|
11,486,725 |
sub.to_csv('submission.csv', index=False )<save_to_csv>
|
mnist = pd.concat([mnist_train, mnist_test] ).reset_index(drop=True)
labels = mnist['label'].values
mnist.drop('label', axis=1, inplace=True)
mnist.columns = cols
|
Digit Recognizer
|
11,486,725 |
sub.to_csv('submission.csv', index=False )<load_from_csv>
|
idx_mnist = mnist.sort_values(by=list(mnist.columns)).index
dataset_from = dataset.sort_values(by=list(mnist.columns)) ['dataset'].values
original_idx = dataset.sort_values(by=list(mnist.columns)) ['index'].values
|
Digit Recognizer
|
11,486,725 |
data_train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
data_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )<count_missing_values>
|
for i in range(len(idx_mnist)) :
if dataset_from[i] == 'test':
sample_submission.loc[original_idx[i], 'Label'] = labels[idx_mnist[i]]
|
Digit Recognizer
|
11,486,725 |
<create_dataframe><EOS>
|
sample_submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
11,427,431 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
|
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
|
Digit Recognizer
|
11,427,431 |
true_labels = data_train.label
data_train = data_train.drop('label', axis = 1 )<categorify>
|
train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
11,427,431 |
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
data_train_embedded = tsne.fit_transform(sample.drop('label', axis = 1))<train_model>
|
Y_train = train["label"]
X_train = train.drop(labels = ["label"],axis = 1 )
|
Digit Recognizer
|
11,427,431 |
X_train, X_holdout, y_train, y_holdout = train_test_split(data_train_pd.drop('label', axis = 1), data_train_pd.label,
test_size = 0.25, random_state=0)
knn = KNeighborsClassifier(n_neighbors=10, n_jobs=-1)
knn.fit(X_train, y_train)
<train_model>
|
X_train = X_train / 255.0
test = test / 255.0
|
Digit Recognizer
|
11,427,431 |
X_train, X_holdout, y_train, y_holdout = train_test_split(data_train_pd.drop('label', axis = 1), data_train_pd.label,
test_size = 0.25, random_state=0)
bnbclf = BernoulliNB()
bnbclf.fit(X_train, y_train )<compute_test_metric>
|
Y_train = np_utils.to_categorical(Y_train, num_classes = 10 )
|
Digit Recognizer
|
11,427,431 |
print("Accuracy score: {:.2f}".format(bnbclf.score(X_holdout, y_holdout)))
print("Cross-entropy loss: {:.2f}".format(log_loss(np.array(y_holdout), bnbclf.predict_proba(X_holdout))))<train_on_grid>
|
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1 )
|
Digit Recognizer
|
11,427,431 |
bnb_params = {'alpha': np.arange(0.01, 0.1, 0.05),
'binarize' : np.arange(0, 0.5, 0.2),
'fit_prior': [True, False]
}
bnbcv = GridSearchCV(bnbclf, param_grid = bnb_params, cv = 3 )<train_model>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
11,427,431 |
bnbcv.fit(X_train, y_train)
bnb_best = bnbcv.best_estimator_<find_best_params>
|
model.compile(optimizer = 'Adam' , loss = "categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
11,427,431 |
bnbcv.best_params_<compute_test_metric>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 )
|
Digit Recognizer
|
11,427,431 |
print("Accuracy score: {:.2f}".format(bnb_best.score(X_holdout, y_holdout)))
print("Cross-entropy loss: {:.2f}".format(log_loss(np.array(y_holdout), bnb_best.predict_proba(X_holdout))))<choose_model_class>
|
epochs = 20
batch_size = 86
|
Digit Recognizer
|
11,427,431 |
model = Sequential()
model.add(Convolution2D(32,(3, 3), activation='relu', input_shape=(28,28,1)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer="zeros", gamma_initializer="ones",))
model.add(Convolution2D(32,(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer="zeros", gamma_initializer="ones",))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))<feature_engineering>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
|
Digit Recognizer
|
11,427,431 |
data_train = data_train / 255
data_test = data_test / 255<prepare_x_and_y>
|
datagen.fit(X_train )
|
Digit Recognizer
|
11,427,431 |
y = np.array(pd.get_dummies(true_labels))<split>
|
from keras.callbacks import ReduceLROnPlateau
|
Digit Recognizer
|
11,427,431 |
X_train, X_holdout, y_train, y_holdout = train_test_split(data_train, y,
test_size = 0.25, random_state=17 )<choose_model_class>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 )
|
Digit Recognizer
|
11,427,431 |
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=5,
min_lr=0.000001,
verbose=1 )<train_model>
|
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction] )
|
Digit Recognizer
|
11,427,431 |
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
result = model.fit(X_train, y_train, batch_size=32, epochs=10, verbose=1, validation_data=(X_holdout, y_holdout), callbacks = [reduce_lr] )<choose_model_class>
|
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
11,427,431 |
layer_names = [layer.name for layer in model.layers]
layer_outputs = [layer.output for layer in model.layers]
layer_outputs = [layer_outputs[0], layer_outputs[2]]
feature_map_model = Model(model.input, layer_outputs)
im = X_train[99:100,:]
feature_maps = feature_map_model.predict(im )<train_model>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("submission.csv",index=False )
|
Digit Recognizer
|
10,925,075 |
augumentator = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=15,
width_shift_range=0.15,
shear_range=0.1,
zoom_range=0.1,
validation_split=0.0,
horizontal_flip=False,
vertical_flip=False)
augumentator.fit(X_train )<train_model>
|
%matplotlib inline
|
Digit Recognizer
|
10,925,075 |
history = model.fit(augumentator.flow(X_train, y_train, batch_size = 32), epochs = 10,
validation_data =(X_holdout, y_holdout), verbose = 1, callbacks = [reduce_lr] )<load_pretrained>
|
train_df = pd.read_csv('.. /input/digit-recognizer/train.csv')
test_df = pd.read_csv('.. /input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
10,925,075 |
mnist = tf.keras.datasets.mnist
(X_train_mnist, y_train_mnist),(X_val_mnist, y_val_mnist)= mnist.load_data()<categorify>
|
X = train_df.drop('label',axis=1)
y = train_df['label']
X = X.values.reshape(-1,28,28,1)
X = X/255
y = to_categorical(y)
print(plt.imshow(X[2][:,:,0]))
print(str(y[1]))
|
Digit Recognizer
|
10,925,075 |
y_train_mnist = np.array(pd.get_dummies(pd.Series(y_train_mnist)))
y_holdout_mnist = np.array(pd.get_dummies(pd.Series(y_val_mnist)) )<define_variables>
|
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=123 )
|
Digit Recognizer
|
10,925,075 |
X_train_mnist = X_train_mnist.reshape(-1, 28, 28, 1)
X_holdout_mnist = X_val_mnist.reshape(-1, 28, 28, 1)
X_train_mnist = X_train_mnist / 255
X_holdout_mnist = X_holdout_mnist /255<concatenate>
|
datagen = ImageDataGenerator(zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1, rotation_range = 10 )
|
Digit Recognizer
|
10,925,075 |
X_train_ext = np.concatenate(( X_train, X_train_mnist), axis = 0)
X_holdout_ext = np.concatenate(( X_holdout, X_holdout_mnist), axis = 0)
y_train_ext = np.concatenate(( y_train, y_train_mnist), axis = 0)
y_holdout_ext = np.concatenate(( y_holdout, y_holdout_mnist), axis = 0 )<train_model>
|
model = Sequential()
|
Digit Recognizer
|
10,925,075 |
model.fit(X_train_ext, y_train_ext, batch_size=32, epochs=20, verbose=1, validation_data=(X_holdout, y_holdout), callbacks = [reduce_lr] )<save_to_csv>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation = 'relu', input_shape =(28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 32, kernel_size =(5, 5), activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(strides =(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3, 3), activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(3, 3), activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(5, 5), activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(strides =(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(1024, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation = 'softmax'))
|
Digit Recognizer
|
10,925,075 |
predictions = model.predict(data_test ).argmax(axis = 1)
predictions
submission = pd.DataFrame({'ImageId':np.arange(1, len(predictions)+1), 'Label':predictions})
submission.to_csv('submission.csv', index=False )<import_modules>
|
model.compile(optimizer='adam',metrics=['accuracy'],loss='categorical_crossentropy' )
|
Digit Recognizer
|
10,925,075 |
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras import utils
from tensorflow.keras.preprocessing import image
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
import tensorflow as tf
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt<load_from_csv>
|
reduction_lr = ReduceLROnPlateau(monitor='val_accuracy',patience=2, verbose=1, factor=0.2, min_lr=0.00001 )
|
Digit Recognizer
|
10,925,075 |
data_train = np.loadtxt('/kaggle/input/digit-recognizer/train.csv', skiprows = 1, delimiter= ',')
data_train[0:5]<train_model>
|
hist = model.fit_generator(datagen.flow(X_train,y_train,batch_size=32),epochs=20,validation_data =(X_test,y_test),callbacks=[reduction_lr] )
|
Digit Recognizer
|
10,925,075 |
x_train = data_train[:, 1:]
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
input_shape =(28, 28, 1 )<feature_engineering>
|
final_loss, final_acc = model.evaluate(X_test, y_test, verbose=0)
print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
|
Digit Recognizer
|
10,925,075 |
x_train = x_train / 255.0<prepare_x_and_y>
|
test_df = test_df.values.reshape(-1, 28, 28, 1)/ 255
y_pred = model.predict(test_df, batch_size = 64)
y_pred = np.argmax(y_pred,axis = 1)
y_pred = pd.Series(y_pred,name="Label")
y_pred
|
Digit Recognizer
|
10,925,075 |
<categorify><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),y_pred],axis = 1)
submission.to_csv("submission.csv",index=False )
|
Digit Recognizer
|
11,722,614 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<split>
|
mnist_test = pd.read_csv(".. /input/mnist-digit-recognizer/mnist_test.csv")
mnist_train = pd.read_csv(".. /input/mnist-digit-recognizer/mnist_train.csv")
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
|
Digit Recognizer
|
11,722,614 |
random_seed = 2
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size = 0.1, random_state=random_seed )<choose_model_class>
|
test['dataset'] = 'test'
|
Digit Recognizer
|
11,722,614 |
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.10,
width_shift_range=0.1,
height_shift_range=0.1)
<choose_model_class>
|
train['dataset'] = 'train'
|
Digit Recognizer
|
11,722,614 |
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"] )<choose_model_class>
|
dataset = pd.concat([train.drop('label', axis=1), test] ).reset_index()
|
Digit Recognizer
|
11,722,614 |
checkpoint = ModelCheckpoint('mnist-cnn.h5',
monitor='val_acc',
save_best_only=True,
verbose=1 )<choose_model_class>
|
mnist = pd.concat([mnist_train, mnist_test] ).reset_index(drop=True)
labels = mnist['label'].values
mnist.drop('label', axis=1, inplace=True)
mnist.columns = cols
|
Digit Recognizer
|
11,722,614 |
learn_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
<train_model>
|
idx_mnist = mnist.sort_values(by=list(mnist.columns)).index
dataset_from = dataset.sort_values(by=list(mnist.columns)) ['dataset'].values
original_idx = dataset.sort_values(by=list(mnist.columns)) ['index'].values
|
Digit Recognizer
|
11,722,614 |
batch_size=96
history = model.fit(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs=30,
validation_data=(X_val, Y_val),
steps_per_epoch=X_train.shape[0] // batch_size,
verbose=1,
callbacks=[checkpoint, learn_rate_reduction] )<load_from_csv>
|
for i in range(len(idx_mnist)) :
if dataset_from[i] == 'test':
sample_submission.loc[original_idx[i], 'Label'] = labels[idx_mnist[i]]
|
Digit Recognizer
|
11,722,614 |
data_test = np.loadtxt('/kaggle/input/digit-recognizer/test.csv', skiprows = 1, delimiter=',')
x_test = data_test.reshape(data_test.shape[0], 28, 28, 1)
x_test /= 255.0<predict_on_test>
|
sample_submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
11,691,016 |
predict = model.predict(x_test)
predict = np.argmax(predict, axis=1 )<concatenate>
|
train = pd.read_csv(".. /input/digit-recognizer/train.csv")
print(train.shape)
train.head()
|
Digit Recognizer
|
11,691,016 |
out = np.column_stack(( range(1, predict.shape[0]+1), predict))<save_to_csv>
|
test= pd.read_csv(".. /input/digit-recognizer/test.csv")
print(test.shape)
test.head()
|
Digit Recognizer
|
11,691,016 |
np.savetxt('submission.csv', out, header="ImageId,Label",
comments="", fmt="%d,%d" )<set_options>
|
Y_train = train["label"]
X_train = train.drop(columns = ["label"],axis = 1 )
|
Digit Recognizer
|
11,691,016 |
%matplotlib inline
<load_from_csv>
|
X_train = X_train / 255.0
test = test / 255.0
print("x_train shape: ",X_train.shape)
print("test shape: ",test.shape )
|
Digit Recognizer
|
11,691,016 |
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )<prepare_x_and_y>
|
Y_train = to_categorical(Y_train, num_classes = 10 )
|
Digit Recognizer
|
11,691,016 |
X = train_data.drop(["label"],axis = 1 ).values
Y = train_data["label"].values<prepare_x_and_y>
|
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1,random_state = 2)
print("x_train shape",X_train.shape)
print("x_test shape",X_val.shape)
print("y_train shape",Y_train.shape)
print("y_test shape",Y_val.shape )
|
Digit Recognizer
|
11,691,016 |
X = X.reshape([42000,28,28,1])
Y = Y.reshape([42000,1] )<categorify>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(3,3),activation = 'relu',
kernel_initializer='he_normal', input_shape =(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters = 32, kernel_size =(3,3),
activation = 'relu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size =(2,2)))
model.add(Dropout(0.4))
model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same',
activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same',
activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size =(2,2), strides =(2,2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(10, activation = 'softmax'))
|
Digit Recognizer
|
11,691,016 |
Y = to_categorical(Y, num_classes = 10 )<split>
|
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999 )
|
Digit Recognizer
|
11,691,016 |
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.1, random_state = 14 )<feature_engineering>
|
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
11,691,016 |
x_train = x_train/255
x_test = x_test/255<choose_model_class>
|
epochs = 39
batch_size = 64
|
Digit Recognizer
|
11,691,016 |
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64,(3,3), padding = 'same', activation='relu', input_shape=(28, 28, 1)) ,
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(64,(3,3), padding = 'same', activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(128,(3,3), padding = 'same', activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(128,(3,3), padding = 'same', activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
] )<choose_model_class>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.10,
zoom_range = 0.10,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
11,691,016 |
optimizer = Adam(learning_rate = 0.001, beta_1 = 0.9, beta_2 = 0.999)
model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics = ['accuracy'] )<choose_model_class>
|
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x)
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
steps_per_epoch=X_train.shape[0] // batch_size, callbacks=[annealer] )
|
Digit Recognizer
|
11,691,016 |
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.6,
min_lr=0.00001 )<define_variables>
|
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("sub.csv",index=False)
|
Digit Recognizer
|
11,664,799 |
batch_size = 64
epochs = 30<train_model>
|
train = pd.read_csv(r".. /input/digit-recognizer/train.csv",dtype = np.float32)
train_label = train.label.values
train_image = train.loc[:,train.columns != "label"].values/255
train_image, valid_image, train_label, valid_label = train_test_split(train_image,
train_label,
test_size = 0.2,
random_state = 7)
train_image = torch.from_numpy(train_image)
train_label = torch.from_numpy(train_label ).type(torch.LongTensor)
valid_image = torch.from_numpy(valid_image)
valid_label = torch.from_numpy(valid_label ).type(torch.LongTensor)
batch_size = 100
n_iters = 15000
num_epochs = n_iters /(len(train_image)/ batch_size)
num_epochs = int(num_epochs)
print('num_epochs',num_epochs)
train = torch.utils.data.TensorDataset(train_image,train_label)
valid = torch.utils.data.TensorDataset(valid_image,valid_label)
train_loader = DataLoader(train, batch_size = batch_size, shuffle = False)
valid_loader = DataLoader(valid, batch_size = batch_size, shuffle = False)
|
Digit Recognizer
|
11,664,799 |
train_datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range = 0.1,
horizontal_flip=False,
vertical_flip=False
)
train_datagen.fit(x_train )<train_model>
|
class res18model(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.backbone = torchvision.models.resnet18(pretrained=True)
self.backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)
in_features = self.backbone.fc.in_features
self.logit = nn.Linear(in_features, num_classes)
def forward(self, x):
batch_size, C, H, W = x.shape
x = self.backbone.conv1(x)
x = self.backbone.bn1(x)
x = self.backbone.relu(x)
x = self.backbone.maxpool(x)
x = self.backbone.layer1(x)
x = self.backbone.layer2(x)
x = self.backbone.layer3(x)
x = self.backbone.layer4(x)
x = F.adaptive_avg_pool2d(x,1 ).reshape(batch_size,-1)
x = F.dropout(x, 0.25, self.training)
x = self.logit(x)
return x
|
Digit Recognizer
|
11,664,799 |
history = model.fit(
train_datagen.flow(x_train,y_train,batch_size = batch_size),
validation_data =(x_test,y_test),
batch_size = batch_size,
steps_per_epoch = x_train.shape[0]//batch_size,
epochs = epochs,
verbose = 1,
callbacks=[learning_rate_reduction]
)<compute_test_metric>
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
|
Digit Recognizer
|
11,664,799 |
model.evaluate(x_test,y_test )<predict_on_test>
|
model = res18model(num_classes=10)
model.to(device)
criterion = nn.CrossEntropyLoss()
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters() , lr=learning_rate, momentum=0.9 )
|
Digit Recognizer
|
11,664,799 |
test_pred = model.predict(data)
test_pred = np.argmax(test_pred,axis=1)
print(test_pred.shape )<load_from_csv>
|
count = 0
for epoch in range(num_epochs):
for i,(images, labels)in enumerate(train_loader):
train = images.view(100,1,28,28)
train = train.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(train)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
count += 1
if count % 500 == 0:
correct = 0
total = 0
for images, labels in valid_loader:
test = images.view(100,1,28,28)
test = test.to(device)
labels = labels.to(device)
outputs = model(test)
predicted = torch.max(outputs.data, 1)[1]
total += len(labels)
correct +=(predicted == labels ).sum()
accuracy = 100 * correct / float(total)
print('Iteration: {} Loss: {} Accuracy: {} %'.format(count, loss.data, accuracy))
|
Digit Recognizer
|
11,664,799 |
sample_submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")
sample_submission<prepare_output>
|
test = pd.read_csv(r".. /input/digit-recognizer/test.csv",dtype = np.float32)
test_image = test.loc[:,:].values/255
test_image = torch.from_numpy(test_image)
test_loader = DataLoader(test_image, batch_size = batch_size, shuffle = False)
|
Digit Recognizer
|
11,664,799 |
index = sample_submission.ImageId
data = {'ImageId' : index,'Label': test_pred}
df = pd.DataFrame(data)
df.head<save_to_csv>
|
submission_df = pd.DataFrame(submission)
submission_df.columns = submission_df.iloc[0]
submission_df = submission_df.drop(0, axis=0)
submission_df.to_csv("submission.csv", index=False )
|
Digit Recognizer
|
11,456,822 |
df.to_csv('submission.csv', index=False )<import_modules>
|
import tensorflow as tf
from tensorflow import keras
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras import layers, Sequential, optimizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
|
Digit Recognizer
|
11,456,822 |
import pandas as pd<load_from_csv>
|
Digit Recognizer
|
|
11,456,822 |
mnist_test = pd.read_csv("/kaggle/input/mnist-fashion-data-classification/mnist_test.csv")
mnist_train = pd.read_csv("/kaggle/input/mnist-fashion-data-classification/mnist_train.csv")
<load_from_csv>
|
raw_csv = "/kaggle/input/digit-recognizer/train.csv"
test_csv = "/kaggle/input/digit-recognizer/test.csv"
raw_df = pd.read_csv(raw_csv)
test_df = pd.read_csv(test_csv )
|
Digit Recognizer
|
11,456,822 |
sample_submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )<feature_engineering>
|
def get_image_and_label(data_frame):
IMGs = data_frame.drop(["label"], axis=1 ).values if 'label' in data_frame.columns else data_frame.values
IMGs = np.array([image.reshape(( 28, 28)) for image in IMGs])
IMGs = np.expand_dims(IMGs, axis=3)
labels = data_frame['label'].values if 'label' in data_frame.columns else None
return IMGs, labels
|
Digit Recognizer
|
11,456,822 |
test['dataset'] = 'test'<feature_engineering>
|
raw_IMGs, raw_labels = get_image_and_label(raw_df)
test_IMGs, _ = get_image_and_label(test_df )
|
Digit Recognizer
|
11,456,822 |
train['dataset'] = 'train'<concatenate>
|
classes = len(set(raw_labels))
classes
|
Digit Recognizer
|
11,456,822 |
dataset = pd.concat([train.drop('label', axis=1), test] ).reset_index()<concatenate>
|
raw_labels = to_categorical(raw_labels, num_classes=classes)
|
Digit Recognizer
|
11,456,822 |
mnist = pd.concat([mnist_train, mnist_test] ).reset_index(drop=True)
labels = mnist['label'].values
mnist.drop('label', axis=1, inplace=True)
mnist.columns = cols<sort_values>
|
train_IMGs, validation_IMGs, trian_labels, validation_labels = \
train_test_split(raw_IMGs, raw_labels, test_size=0.1, random_state=42)
|
Digit Recognizer
|
11,456,822 |
idx_mnist = mnist.sort_values(by=list(mnist.columns)).index
dataset_from = dataset.sort_values(by=list(mnist.columns)) ['dataset'].values
original_idx = dataset.sort_values(by=list(mnist.columns)) ['index'].values<feature_engineering>
|
model = Sequential([
layers.Conv2D(64,(3,3), activation="relu", input_shape=(28,28,1)) ,
layers.BatchNormalization() ,
layers.MaxPooling2D(( 2,2)) ,
layers.Conv2D(128,(3, 3), activation="relu"),
layers.BatchNormalization() ,
layers.MaxPooling2D(( 2,2)) ,
layers.Conv2D(256,(3,3), activation="relu"),
layers.BatchNormalization() ,
layers.Flatten() ,
layers.Dense(1024, activation="relu"),
layers.Dropout(0.2),
layers.Dense(256, activation="relu"),
layers.Dropout(0.2),
layers.Dense(64, activation="relu"),
layers.Dropout(0.2),
layers.Dense(32, activation="relu"),
layers.Dropout(0.2),
layers.Dense(16, activation="relu"),
layers.Dropout(0.2),
layers.Dense(int(classes), activation="softmax")
])
|
Digit Recognizer
|
11,456,822 |
for i in range(len(idx_mnist)) :
if dataset_from[i] == 'test':
sample_submission.loc[original_idx[i], 'Label'] = labels[idx_mnist[i]]<save_to_csv>
|
model.compile(loss="categorical_crossentropy",
optimizer=optimizers.Adam(learning_rate=1e-4),
metrics=['accuracy'] )
|
Digit Recognizer
|
11,456,822 |
sample_submission.to_csv('submission.csv', index=False )<load_from_csv>
|
train_datagen = ImageDataGenerator(
rescale=1/255,
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
shear_range=0.1
)
validation_datagen = ImageDataGenerator(rescale=1/255)
test_datagen = ImageDataGenerator(rescale=1/255 )
|
Digit Recognizer
|
11,456,822 |
tf.random.set_seed(42)
%matplotlib inline
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', None)
pd.set_option('float_format', '{:f}'.format)
mpl.rcParams['figure.dpi'] = 600
warnings.filterwarnings('ignore')
tf.get_logger().setLevel('INFO')
train_df = pd.read_csv('.. /input/digit-recognizer/train.csv')
test_df = pd.read_csv('.. /input/digit-recognizer/test.csv')
submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv', index_col = 'ImageId')
x = train_df.drop(columns = 'label' ).values.reshape(-1, 28, 28)
y = train_df['label'].values
x_test = test_df.values.reshape(-1, 28, 28)
n_labels = len(np.unique(y))
del train_df, test_df
gc.collect()<choose_model_class>
|
train_generator = train_datagen.flow(train_IMGs, trian_labels, batch_size=32)
validation_generator = train_datagen.flow(validation_IMGs, validation_labels, batch_size=32)
test_generator = test_datagen.flow(test_IMGs, batch_size=32, shuffle=False)
history = model.fit_generator(train_generator, epochs=150, validation_data=validation_generator, verbose=1 )
|
Digit Recognizer
|
11,456,822 |
def define_model(input_shape, n_classes, n_conv_branches, dropout):
inputs = layers.Input(shape = input_shape)
b_in = layers.experimental.preprocessing.Rescaling(1./ 255 )(inputs)
branches = [b_in] * n_conv_branches
for i in range(n_conv_branches):
for filter_size in [32, 64, 128, 128]:
branches[i] = layers.Conv2D(
filters = filter_size,
kernel_size = 3,
padding = 'same',
)(branches[i])
branches[i] = layers.MaxPool2D(pool_size =(2, 2))(branches[i])
branches[i] = layers.ReLU()(branches[i])
branches[i] = layers.Dropout(dropout )(branches[i])
if n_conv_branches > 1:
b_out = layers.concatenate(branches)
b_out = layers.Flatten()(b_out)
else:
b_out = layers.Flatten()(branches[0])
b_out = layers.Dense(units = 128 )(b_out)
b_out = layers.BatchNormalization()(b_out)
b_out = layers.ReLU()(b_out)
b_out = layers.Dropout(dropout )(b_out)
outputs = layers.Dense(units = n_classes )(b_out)
return Model(inputs, outputs )<split>
|
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(accuracy))
|
Digit Recognizer
|
11,456,822 |
N_SPLITS = 10
CHECKPOINT_DIR = './checkpoint'
cv = StratifiedKFold(n_splits = N_SPLITS, random_state = 42, shuffle = True)
oof_pred = np.zeros(( x_test.shape[0], n_labels))
cv_val_scores = np.zeros(N_SPLITS)
histories = []
k = 0
for train_i, val_i in cv.split(x, y):
x_train = x[train_i, :]
x_valid = x[val_i, :]
y_train = y[train_i]
y_valid = y[val_i]
model = define_model(( x.shape[1], x.shape[2], 1), n_labels, 2, 0.2)
gc.collect()
optimizer = Adam(
learning_rate = 5e-4,
)
model.compile(
optimizer = optimizer,
loss = SparseCategoricalCrossentropy(from_logits = True),
metrics = ['accuracy']
)
checkpoint_call = ModelCheckpoint(
filepath = CHECKPOINT_DIR,
save_weights_only = True,
monitor = 'val_accuracy',
mode = 'max',
save_best_only = True
)
stopping_call = EarlyStopping(
monitor = 'val_accuracy',
patience = 50,
mode = 'max'
)
history = model.fit(
x_train, y_train,
validation_data =(x_valid, y_valid),
epochs = 200,
callbacks = [checkpoint_call, stopping_call],
batch_size = 1024,
)
histories += [history]
model.load_weights(CHECKPOINT_DIR)
predictor_model = tf.keras.Sequential([model, layers.Softmax() ])
cv_val_scores[k] = model.evaluate(x_valid, y_valid)[1]
oof_pred += predictor_model.predict(x_test)/ N_SPLITS
k += 1<compute_test_metric>
|
pred_labels = model.predict_generator(test_generator )
|
Digit Recognizer
|
11,456,822 |
print('Validation AUC: {:.6} ± {:.4}'.format(cv_val_scores.mean() , cv_val_scores.std()))<save_to_csv>
|
pred_labels = np.argmax(pred_labels, axis=-1)
pred_labels
|
Digit Recognizer
|
11,456,822 |
submission.loc[:, 'Label'] = np.argmax(oof_pred, axis = 1)
submission.to_csv('submission.csv' )<load_from_csv>
|
my_submission = pd.DataFrame({'ImageId': test_df.index + 1, 'Label': pred_labels})
my_submission.head()
|
Digit Recognizer
|
11,456,822 |
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
train_image = np.array(train.drop(['label'], axis=1), dtype="float32")/ 255
train_image = train_image.reshape(-1, 28, 28, 1)
train_label = tf.keras.utils.to_categorical(train['label'])
test = np.array(test, dtype="float32")/ 255
test = test.reshape(-1, 28, 28, 1)
show_images(train_image[:25], train_label[:25], shape=(5,5))<train_model>
|
my_submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
11,485,474 |
( image_train_mnist, label_train_mnist),(image_test_mnist, label_test_mnist)= mnist.load_data()
image_mnist = np.concatenate(( image_train_mnist, image_test_mnist))
label_mnist = np.concatenate(( label_train_mnist, label_test_mnist))
image_mnist = image_mnist.reshape(-1,28,28,1)
image_mnist = image_mnist.astype(np.float32)/ 255
label_mnist = tf.keras.utils.to_categorical(label_mnist,num_classes=10)
images = np.concatenate(( train_image, image_mnist))
labels = np.concatenate(( train_label, label_mnist))
print("training image dataset shape:", images.shape)
print("training label dataset shape:", labels.shape)
show_images(images[:25], labels[:25], shape=(5,5))<define_variables>
|
data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv' )
|
Digit Recognizer
|
11,485,474 |
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=20,
width_shift_range=0.20,
shear_range=15,
zoom_range=0.10,
validation_split=0.25,
horizontal_flip=False
)
train_generator = datagen.flow(
images,
labels,
batch_size=256,
subset='training',
)
validation_generator = datagen.flow(
images,
labels,
batch_size=64,
subset='validation',
)<choose_model_class>
|
MinMaxScaler = preprocessing.MinMaxScaler()
input_data = MinMaxScaler.fit_transform(input_data )
|
Digit Recognizer
|
11,485,474 |
def create_model() :
model = tf.keras.Sequential([
tf.keras.layers.Reshape(( 28, 28, 1)) ,
tf.keras.layers.Conv2D(filters=32, kernel_size=(5,5), activation="relu", padding="same", input_shape=(28,28,1)) ,
tf.keras.layers.MaxPool2D(( 2,2)) ,
tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu", padding="same"),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu", padding="same"),
tf.keras.layers.MaxPool2D(( 2,2)) ,
tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation="relu", padding="same"),
tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation="relu", padding="same"),
tf.keras.layers.MaxPool2D(( 2,2)) ,
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(512, activation="sigmoid"),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(512, activation="sigmoid"),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(256, activation="sigmoid"),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(10, activation="sigmoid")
])
model.compile(
optimizer="adam", loss = 'categorical_crossentropy', metrics = ['accuracy']
)
return model
model = create_model()<train_model>
|
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
11,485,474 |
history = model.fit_generator(train_generator, epochs=60, validation_data=validation_generator, callbacks=[reduce_lr,checkpoint], verbose=1 )<save_to_csv>
|
test_data = MinMaxScaler.fit_transform(test)
test_data = test_data.reshape(-1, 28, 28, 1)
test_data.shape
|
Digit Recognizer
|
11,485,474 |
df = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" ).astype("float32")/ 255.0
res = tf.keras.backend.argmax(model.predict(df))
csv = pd.DataFrame({'ImageId': range(1, len(res)+ 1), "Label": res})
csv.to_csv('submission.csv', index=False )<import_modules>
|
X_train, X_test, y_train, y_test = train_test_split(input_data, output_data.values, test_size=0.3, random_state=1 )
|
Digit Recognizer
|
11,485,474 |
import numpy as np
import pandas as pd
import scipy
import sklearn
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from itertools import product
from sklearn.cluster import KMeans
import lightgbm as lgb<define_variables>
|
from keras.utils.np_utils import to_categorical
|
Digit Recognizer
|
11,485,474 |
dpath = '.. /input/competitive-data-science-predict-future-sales/'
adpath ='.. /input/predict-future-sales/'<load_from_csv>
|
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10 )
|
Digit Recognizer
|
11,485,474 |
df_train = pd.read_csv(dpath + 'sales_train.csv')
df_test = pd.read_csv(dpath + 'test.csv', index_col='ID')
df_shops = pd.read_csv(dpath + 'shops.csv', index_col='shop_id')
df_items = pd.read_csv(dpath + 'items.csv', index_col='item_id')
df_itemcat = pd.read_csv(dpath + 'item_categories.csv', index_col='item_category_id')
sample_submission = pd.read_csv(dpath + 'sample_submission.csv', index_col='ID' )<load_from_csv>
|
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout, AveragePooling2D
import keras.backend as K
|
Digit Recognizer
|
11,485,474 |
calendar = pd.read_csv(adpath + 'calendar.csv', dtype='int16' )<categorify>
|
generator = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
rotation_range = 20,
shear_range = 0.3,
zoom_range = 0.3,
horizontal_flip = True)
generator.fit(X_train )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.