kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
3,729,811 |
model = CNNStacking(CFG['target_size'])
states = [torch.load(STAGE2_DIR+f'/fold{fold}_best.pth')for fold in CFG['trn_fold']]
test_dataset = StackingDataset(stage1_predictions)
test_loader = DataLoader(test_dataset, batch_size=CFG['batch_size'], shuffle=False,
num_workers=CFG['num_workers'], pin_memory=True)
predictions = inference(model, states, test_loader, device)
test['label'] = predictions.argmax(1)
test[['image_id', 'label']].to_csv(OUTPUT_DIR+'submission.csv', index=False )<import_modules>
|
log_dir = '.. /work'
|
Digit Recognizer
|
3,729,811 |
from fastai.vision.all import *
import albumentations<define_variables>
|
train_df_n = train_df.astype('float32')/ 255
test_df_n = test_df.astype('float32')/ 255
train_np_n = train_df_n.values
test_np_n = test_df_n.values
print(train_np_n.shape)
print(test_np_n.shape )
|
Digit Recognizer
|
3,729,811 |
set_seed(42 )<categorify>
|
X_train, X_val, y_train, y_val = train_test_split(train_np_n, train_label_np, test_size=0.25, random_state=42)
print(X_train.shape)
print(y_train.shape)
print(X_val.shape)
print(y_val.shape )
|
Digit Recognizer
|
3,729,811 |
class AlbumentationsTransform(RandTransform):
"A transform handler for multiple `Albumentation` transforms"
split_idx,order=None,2
def __init__(self, train_aug, valid_aug): store_attr()
def before_call(self, b, split_idx):
self.idx = split_idx
def encodes(self, img: PILImage):
if self.idx == 0:
aug_img = self.train_aug(image=np.array(img)) ['image']
else:
aug_img = self.valid_aug(image=np.array(img)) ['image']
return PILImage.create(aug_img )<prepare_x_and_y>
|
print(y_train.shape)
print(y_val.shape)
Y_train = to_categorical(y_train)
Y_val = to_categorical(y_val)
print(Y_train.shape)
print(Y_val.shape )
|
Digit Recognizer
|
3,729,811 |
def get_x(row): return data_path/row['image_id']
def get_y(row): return row['label']<choose_model_class>
|
def step_decay_for_conv2(epoch):
x = 0.0005
if epoch >= 20: x = 0.0001
if epoch >= 40: x = 0.00005
return x
lr_decay = LearningRateScheduler(step_decay_for_conv2,verbose=0 )
|
Digit Recognizer
|
3,729,811 |
class CassavaModel(Module):
def __init__(self, num_classes):
self.effnet = EfficientNet.from_pretrained("efficientnet-b3")
self.dropout = nn.Dropout(0.1)
self.out = nn.Linear(1536, num_classes)
def forward(self, image):
batch_size, _, _, _ = image.shape
x = self.effnet.extract_features(image)
x = F.adaptive_avg_pool2d(x, 1 ).reshape(batch_size, -1)
outputs = self.out(self.dropout(x))
return outputs<define_variables>
|
def create_model2() :
inputs_mnist = Input(shape=(28,28,1))
inputs = Conv2D(filters=64, kernel_size=(3,3), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs_mnist)
inputs = Conv2D(filters=128, kernel_size=(3,3), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs)
inputs = BatchNormalization()(inputs)
inputs = MaxPooling2D(pool_size=(2,2))(inputs)
inputs = Conv2D(filters=128, kernel_size=(3,3), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs)
inputs = Conv2D(filters=128, kernel_size=(3,3), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs)
inputs = BatchNormalization()(inputs)
inputs = MaxPooling2D(pool_size=(2,2))(inputs)
inputs = Conv2D(filters=128, kernel_size=(3,3), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs)
inputs = Conv2D(filters=128, kernel_size=(3,3), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs)
inputs = BatchNormalization()(inputs)
inputs = MaxPooling2D(pool_size=(2,2))(inputs)
inputs = Conv2D(filters=128, kernel_size=(3,3), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs)
inputs = Conv2D(filters=128, kernel_size=(3,3), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs)
inputs = BatchNormalization()(inputs)
inputs_last = MaxPooling2D(pool_size=(2,2))(inputs)
inputs2 = Conv2D(filters=64, kernel_size=(5,5), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs_mnist)
inputs2 = Conv2D(filters=128, kernel_size=(5,5), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs2)
inputs2 = BatchNormalization()(inputs2)
inputs2 = MaxPooling2D(pool_size=(2,2))(inputs2)
inputs2 = Conv2D(filters=128, kernel_size=(5,5), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs2)
inputs2 = Conv2D(filters=128, kernel_size=(5,5), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs2)
inputs2 = BatchNormalization()(inputs2)
inputs2 = MaxPooling2D(pool_size=(2,2))(inputs2)
inputs2 = Conv2D(filters=128, kernel_size=(5,5), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs2)
inputs2 = Conv2D(filters=128, kernel_size=(5,5), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs2)
inputs2 = BatchNormalization()(inputs2)
inputs2 = MaxPooling2D(pool_size=(2,2))(inputs2)
inputs2 = Conv2D(filters=128, kernel_size=(5,5), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs2)
inputs2 = Conv2D(filters=128, kernel_size=(5,5), padding='same', bias_regularizer=regularizers.l2(0.005))(inputs2)
inputs2 = BatchNormalization()(inputs2)
inputs2_last = MaxPooling2D(pool_size=(2,2))(inputs2)
inputs_ad_ls = Flatten()(inputs_last)
inputs_ad_ls = Dense(units=4096, activation='relu' )(inputs_ad_ls)
inputs_ad_ls = Dropout(rate=0.5 )(inputs_ad_ls)
inputs_ad_ls = Dense(units=4096, activation='relu' )(inputs_ad_ls)
inputs_ad_ls = Dropout(rate=0.5 )(inputs_ad_ls)
outputs_ad_ls = Dense(units=10, activation='softmax', name='1st_fc' )(inputs_ad_ls)
inputs2_ad_ls = Flatten()(inputs2_last)
inputs2_ad_ls = Dense(units=4096, activation='relu' )(inputs2_ad_ls)
inputs2_ad_ls = Dropout(rate=0.5 )(inputs2_ad_ls)
inputs2_ad_ls = Dense(units=4096, activation='relu' )(inputs2_ad_ls)
inputs2_ad_ls = Dropout(rate=0.5 )(inputs2_ad_ls)
outputs2_ad_ls = Dense(units=10, activation='softmax', name='2nd_fc' )(inputs2_ad_ls)
inputs3 = keras.layers.concatenate([inputs_last, inputs2_last])
inputs_2_fc = Flatten()(inputs3)
inputs_2_fc = Dense(units=8192, activation='relu' )(inputs_2_fc)
inputs_2_fc = Dropout(rate=0.5 )(inputs_2_fc)
inputs_2_fc = Dense(units=4096, activation='relu' )(inputs_2_fc)
inputs_2_fc = Dropout(rate=0.5 )(inputs_2_fc)
inputs_2_fc = Dense(units=4096, activation='relu' )(inputs_2_fc)
inputs_2_fc = Dropout(rate=0.5 )(inputs_2_fc)
outputs = Dense(units=10, activation='softmax', name='last_fc' )(inputs_2_fc)
model = Model(inputs=[inputs_mnist], outputs=[outputs, outputs_ad_ls, outputs2_ad_ls])
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'], loss_weights=[1.0, 0.2, 0.2])
model.summary()
return model
|
Digit Recognizer
|
3,729,811 |
Path('/kaggle/input' ).ls()<load_pretrained>
|
_model = create_model2()
|
Digit Recognizer
|
3,729,811 |
learn = load_learner(Path('/kaggle/input/effnet-inference/inference(1)'), cpu=False )<define_variables>
|
def fit_the_model(_model, _epochs):
original_hist = _model.fit(np.array(X_train), [np.array(Y_train),np.array(Y_train),np.array(Y_train)], epochs=_epochs, batch_size=batch_size,
verbose=1,
callbacks=[lr_decay],
validation_data=(np.array(X_val), [np.array(Y_val),np.array(Y_val),np.array(Y_val)]))
return original_hist
|
Digit Recognizer
|
3,729,811 |
path = Path(".. /input")
data_path = path/'cassava-leaf-disease-classification'<load_from_csv>
|
def fit_the_model_with_data(_model, _epochs, X_train, Y_train, X_val, Y_val, _cp):
original_hist = _model.fit(np.array(X_train), [np.array(Y_train),np.array(Y_train),np.array(Y_train)], epochs=_epochs, batch_size=batch_size,
verbose=0,
callbacks=[lr_decay, _cp],
validation_data=(np.array(X_val), [np.array(Y_val),np.array(Y_val),np.array(Y_val)]))
return original_hist
|
Digit Recognizer
|
3,729,811 |
test_df = pd.read_csv(data_path/'sample_submission.csv')
test_df.head()<prepare_output>
|
from sklearn.model_selection import KFold
import numpy as np
|
Digit Recognizer
|
3,729,811 |
test_copy = test_df.copy()
test_copy['image_id'] = test_copy['image_id'].apply(lambda x: f'test_images/{x}' )<train_model>
|
X = np.copy(train_np_n)
y = np.copy(train_label_np)
kf = KFold(n_splits=4,shuffle=True)
kf.get_n_splits(X)
print(kf )
|
Digit Recognizer
|
3,729,811 |
test_dl = learn.dls.test_dl(test_copy )<predict_on_test>
|
_i = 0
for train_index, val_index in kf.split(X):
filepath=".. /work/kfold_cp"+str(_i)+".hdf5"
_cp = ModelCheckpoint(filepath, monitor='val_last_fc_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='max', period=1)
_m = create_model2() ;
X_t, X_v = X[train_index], X[val_index]
y_t, y_v = y[train_index], y[val_index]
X_t = X_t.reshape(X_t.shape[0],28,28,1)
X_v = X_v.reshape(X_v.shape[0],28,28,1)
Y_t = to_categorical(y_t)
Y_v = to_categorical(y_v)
print(X_t.shape, y_t.shape, Y_t.shape, X_v.shape, y_v.shape, Y_v.shape)
epochs=100
history = fit_the_model_with_data(_m, epochs, X_t, Y_t, X_v, Y_v, _cp)
check_result(history.history);
_m.save(".. /work/"+"kfold"+str(_i)+'.h5')
_i += 1
|
Digit Recognizer
|
3,729,811 |
preds, _ = learn.get_preds(dl=test_dl )<feature_engineering>
|
model_cp0 = load_model(".. /work/kfold_cp0.hdf5")
model_cp1 = load_model(".. /work/kfold_cp1.hdf5")
model_cp2 = load_model(".. /work/kfold_cp2.hdf5")
model_cp3 = load_model(".. /work/kfold_cp3.hdf5" )
|
Digit Recognizer
|
3,729,811 |
test_df['label'] = preds.argmax(dim=-1 ).numpy()<save_to_csv>
|
def pred_argmax(_model, _data, _label=np.array([0])) :
c_prob, c_prob1, c_prob2 = _model.predict(_data)
pred= np.argmax(c_prob, axis=1)
if len(_label)!= 1:
correct = np.argmax(_label,axis=1)
else:
correct = 0
return c_prob, pred, correct
def pred_argmax2(_model,_gen, _data, _label=np.array([0])) :
_batch_size=128
_s = len(_data)/ _batch_size
c_prob = _model.predict_generator(_gen.flow(_data,batch_size=_batch_size,shuffle=None), steps=_s)
pred= np.argmax(c_prob, axis=1)
if len(_label)!= 1:
correct = np.argmax(_label,axis=1)
else:
correct = 0
return c_prob, pred, correct
def disp_image(_data, _pred, _correct, _list, _isdisplay="off"):
pair_list = []
wrong_list = []
for _i in _list:
if _isdisplay == "on":
plt.imshow(_data[_i].reshape(28, 28), cmap=plt.get_cmap('gray'))
plt.show()
pair_list.append(str(_correct[_i])+" as "+str(_pred[_i]))
wrong_list.append(_pred[_i])
return pair_list, wrong_list
|
Digit Recognizer
|
3,729,811 |
test_df.to_csv('submission.csv', index=False )<define_variables>
|
X_all = train_np_n.reshape(train_np_n.shape[0], 28, 28, 1)
Y_all = to_categorical(train_label_np)
print(X_all.shape)
print(Y_all.shape)
|
Digit Recognizer
|
3,729,811 |
package_paths = [
'.. /input/pytorch-image-models/pytorch-image-models-master'
]
for pth in package_paths:
sys.path.append(pth )<import_modules>
|
X0_cp_all_prob, X0_cp_all_pred, X0_cp_all_correct = pred_argmax(model_cp0, X_all,Y_all)
X1_cp_all_prob, X1_cp_all_pred, X1_cp_all_correct = pred_argmax(model_cp1, X_all,Y_all)
X2_cp_all_prob, X2_cp_all_pred, X2_cp_all_correct = pred_argmax(model_cp2, X_all,Y_all)
X3_cp_all_prob, X3_cp_all_pred, X3_cp_all_correct = pred_argmax(model_cp3, X_all,Y_all)
_d0_cp = np.not_equal(X0_cp_all_pred, X0_cp_all_correct)
_d1_cp = np.not_equal(X1_cp_all_pred, X1_cp_all_correct)
_d2_cp = np.not_equal(X2_cp_all_pred, X2_cp_all_correct)
_d3_cp = np.not_equal(X3_cp_all_pred, X3_cp_all_correct)
print(sum(_d0_cp))
print(sum(_d1_cp))
print(sum(_d2_cp))
print(sum(_d3_cp))
DF_cp = np.argmin([sum(_d0_cp),sum(_d1_cp),sum(_d2_cp),sum(_d3_cp)], axis=0)
print(DF_cp )
|
Digit Recognizer
|
3,729,811 |
<load_from_csv>
|
X0_test_prob, X0_test_pred, X0_test_correct = pred_argmax(model_cp0, X_test)
X1_test_prob, X1_test_pred, X1_test_correct = pred_argmax(model_cp1, X_test)
X2_test_prob, X2_test_pred, X2_test_correct = pred_argmax(model_cp2, X_test)
X3_test_prob, X3_test_pred, X3_test_correct = pred_argmax(model_cp3, X_test)
|
Digit Recognizer
|
3,729,811 |
submission = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv')
submission.head()<set_options>
|
pred_info_test = np.concatenate([X0_test_pred.reshape(X0_test_pred.shape[0],1), X1_test_pred.reshape(X1_test_pred.shape[0],1), X2_test_pred.reshape(X2_test_pred.shape[0],1), X3_test_pred.reshape(X3_test_pred.shape[0],1)], axis=1)
print(pred_info_test.shape)
print(pred_info_test )
|
Digit Recognizer
|
3,729,811 |
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def get_img(path):
im_bgr = cv2.imread(path)
im_rgb = im_bgr[:, :, ::-1]
return im_rgb
ShiftScaleRotate, Normalize, Compose, CenterCrop, Resize, HorizontalFlip,
VerticalFlip, Transpose, RandomResizedCrop, HueSaturationValue, RandomBrightnessContrast,
CoarseDropout, Cutout
)
def get_infer_transforms() :
return Compose([
CenterCrop(config['img_size'], config['img_size'], p=1.0),
Resize(config['img_size'], config['img_size']),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.0 )<categorify>
|
def return_result(preds):
_c = Counter(preds)
_v = _c.most_common(1)[0][0]
_n = _c.most_common(1)[0][1]
if _n == 4 or _n ==3:
return _v
else:
return preds[DF_cp]
|
Digit Recognizer
|
3,729,811 |
class LeafDataset(Dataset):
def __init__(self, df, img_dir, transforms=None, include_labels=True):
super().__init__()
self.df = df
self.img_dir = img_dir
self.transforms = transforms
self.include_labels = include_labels
if include_labels:
self.labels = self.df['label'].values
def __len__(self):
return len(self.df)
def __getitem__(self, index: int):
img = get_img("{}/{}".format(self.img_dir, self.df.loc[index]['image_id']))
if self.transforms:
img = self.transforms(image=img)['image']
if self.include_labels:
label = self.labels[index]
return img, label
else:
return img;<choose_model_class>
|
rdf_test = pd.DataFrame(pred_info_test ).apply(return_result, axis=1)
rdf_test
|
Digit Recognizer
|
3,729,811 |
class LeafDiseaseClassifier(nn.Module):
def __init__(self, model_arch, num_classes, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained)
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, num_classes)
def forward(self, x):
x = self.model(x)
return x
def freeze_batch_norm(self):
layers = [mod for mod in self.model.children() ]
for layer in layers:
if isinstance(layer, nn.BatchNorm2d):
for param in layer.parameters() :
param.requires_grad = False
elif isinstance(layer, nn.Sequential):
for seq_layers in layer.children() :
if isinstance(layer, nn.BatchNorm2d):
param.requires_grad = False<create_dataframe>
|
pred_df["label"]=rdf_test
pred_df
|
Digit Recognizer
|
3,729,811 |
if __name__ == '__main__':
seed_everything(config['seed'])
test = pd.DataFrame()
test['image_id'] = list(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'))
test_ds = LeafDataset(test, '.. /input/cassava-leaf-disease-classification/test_images/', transforms=get_infer_transforms() , include_labels=False)
test_loader = torch.utils.data.DataLoader(
test_ds,
batch_size=config['test_bs'],
num_workers=config['num_workers'],
shuffle=False,
)
device = torch.device(config['device'])
model = LeafDiseaseClassifier(config['model_arch'], config['num_classes'] ).to(device)
model.load_state_dict(torch.load('.. /input/effnet-b4/tf_efficientnet_b4_ns_Fold4_Epoch5_Acc_0.8952.pth'))
model.eval()
preds = []
pbar = tqdm(enumerate(test_loader), total=len(test_loader))
for step,(test_batch)in pbar:
test_batch = test_batch.to(device ).float()
test_preds = model(test_batch)
preds += [torch.softmax(test_preds, 1 ).detach().cpu().numpy() ]
preds = np.concatenate(preds, axis=0)
<save_to_csv>
|
pred_df.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
4,269,364 |
test['label'] = np.argmax(preds, axis=1)
test.head()
test.to_csv('submission.csv', index=False )<find_best_params>
|
dataset=pd.read_csv('.. /input/train.csv' )
|
Digit Recognizer
|
4,269,364 |
del model
torch.cuda.empty_cache()<load_from_csv>
|
y=dataset['label']
|
Digit Recognizer
|
4,269,364 |
train=pd.read_csv(r'.. /input/bike-sharing-demand/train.csv')
test=pd.read_csv(r'.. /input/bike-sharing-demand/test.csv')
df=train.copy()
test_df=test.copy()
df.head()<feature_engineering>
|
X_train=dataset.iloc[:,1:]
|
Digit Recognizer
|
4,269,364 |
df['datetime'] = pd.to_datetime(df['datetime'])
test_df['datetime'] = pd.to_datetime(test_df['datetime'])
df['year'] = df['datetime'].apply(lambda x: x.year)
df['month'] = df['datetime'].apply(lambda x: x.month)
df['day'] = df['datetime'].apply(lambda x: x.day)
df['hour'] = df['datetime'].apply(lambda x: x.hour)
test_df['year'] = test_df['datetime'].apply(lambda x: x.year)
test_df['month'] = test_df['datetime'].apply(lambda x: x.month)
test_df['day'] = test_df['datetime'].apply(lambda x: x.day)
test_df['hour'] = test_df['datetime'].apply(lambda x: x.hour)
df.drop(['datetime', 'casual', 'registered'], axis=1, inplace=True)
test_df.drop(['datetime'], axis=1, inplace=True)
df<categorify>
|
X_test=pd.read_csv('.. /input/test.csv' )
|
Digit Recognizer
|
4,269,364 |
df = pd.get_dummies(df, columns=['year', 'month', 'day', 'hour', 'holiday', 'workingday', 'season', 'weather'])
test_df = pd.get_dummies(test_df, columns=['year', 'month', 'day', 'hour', 'holiday', 'workingday', 'season', 'weather'])
df, test_df = df.align(test_df, join='left', axis=1)
test_df = test_df.drop(['count'], axis=1)
print(df.shape)
print(test_df.shape )<compute_test_metric>
|
X_train=X_train/255.0
X_test=X_test/255.0
|
Digit Recognizer
|
4,269,364 |
def rmsle(y, pred):
log_y = np.log1p(y)
log_pred = np.log1p(pred)
squared_error =(log_y - log_pred)**2
rmsle = np.sqrt(np.mean(squared_error))
return rmsle<train_on_grid>
|
from keras.models import Sequential
from keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPool2D
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split, cross_val_score
import itertools
|
Digit Recognizer
|
4,269,364 |
df_train_target = df['count']
df_train_features = df.drop('count',axis=1)
def print_best_params(model, params):
grid_model = GridSearchCV(
model,
param_grid = params,
cv=5,
scoring='neg_mean_squared_error')
grid_model.fit(df_train_features, df_train_target)
rmse = np.sqrt(-1*grid_model.best_score_)
print(
'{0} 5 CV 시 최적 평균 RMSE 값 {1}, 최적 alpha:{2}'.format(model.__class__.__name__, np.round(rmse, 4), grid_model.best_params_))
return grid_model.best_estimator_<feature_engineering>
|
target=to_categorical(y,10 )
|
Digit Recognizer
|
4,269,364 |
df['count'] = np.log1p(df['count'] )<train_model>
|
X_t, X_v, Y_t, Y_v = train_test_split(X_train, target, test_size = 0.1 )
|
Digit Recognizer
|
4,269,364 |
x_train,x_test,y_train,y_test=train_test_split(df.drop('count',axis=1),df['count'],test_size=0.3,random_state=42)
lr_reg = LinearRegression()
lr_reg.fit(x_train, y_train)
lr_pred = lr_reg.predict(x_test)
y_test_exp = np.expm1(y_test)
lr_pred_exp = np.expm1(lr_pred)
print('LinearRegression RMSLE:', rmsle(y_test_exp, lr_pred_exp))
<compute_train_metric>
|
model=Sequential()
model.add(Conv2D(filters=64,kernel_size=(7,7),padding = 'Same',
activation ='relu', input_shape =(28,28,1)) )
|
Digit Recognizer
|
4,269,364 |
rf_model = RandomForestRegressor()
rf_model.fit(x_train, y_train)
rf_pred = rf_model.predict(x_test)
y_test_exp = np.expm1(y_test)
rf_pred_exp = np.expm1(rf_pred)
print('RandomForest RMSLE:', rmsle(y_test_exp, rf_pred_exp))<compute_test_metric>
|
model.add(Conv2D(filters=64,kernel_size=(7,7),padding = 'Same',
activation ='relu'))
|
Digit Recognizer
|
4,269,364 |
xgb_model = XGBRegressor(learning_rate=0.2)
xgb_model.fit(x_train, y_train)
xgb_pred = xgb_model.predict(x_test)
y_test_exp = np.expm1(y_test)
xgb_pred_exp = np.expm1(xgb_pred)
print('xgboost RMSLE:', rmsle(y_test_exp, xgb_pred_exp))<import_modules>
|
model.add(MaxPool2D(pool_size=(2,2)) )
|
Digit Recognizer
|
4,269,364 |
lgb_params = {
'learning_rate' : [0.05],
'n_estimators':[500],
'max_bin' : [80],
}
lgb_model = LGBMRegressor()
lgb_model.fit(x_train, y_train)
lgb_pred = lgb_model.predict(x_test)
y_test_exp = np.expm1(y_test)
lgb_pred_exp = np.expm1(lgb_pred)
print('LGBMRegressor RMSLE:', rmsle(y_test_exp,lgb_pred_exp))
lgb_estimator = print_best_params(lgb_model, lgb_params)
<prepare_x_and_y>
|
model.add(Dropout(0.3))
|
Digit Recognizer
|
4,269,364 |
X_train = df.drop(['count'], axis=1)
y_train = df['count']
X_test = test_df
X_test<load_from_csv>
|
model.add(Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)) )
|
Digit Recognizer
|
4,269,364 |
lgb_model = LGBMRegressor()
lgb_model.fit(X_train, y_train)
pred = lgb_model.predict(X_test)
pred_exp = np.expm1(pred)
submission = pd.read_csv('.. /input/bike-sharing-demand/sampleSubmission.csv')
submission.loc[:, 'count'] = pred_exp
submission<save_to_csv>
|
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(512, activation = "relu", use_bias= True))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
4,269,364 |
submission.to_csv('submission.csv', index=False )<save_to_csv>
|
model.compile(optimizer = 'adam' , loss = "categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
4,269,364 |
submission.to_csv('submission.csv', index=False )<set_options>
|
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.2,
width_shift_range=0.2,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_t )
|
Digit Recognizer
|
4,269,364 |
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
<load_from_csv>
|
model.fit_generator(datagen.flow(X_t,Y_t, batch_size= 82),
epochs = 60, validation_data =(X_v,Y_v),
verbose = 2, steps_per_epoch=X_t.shape[0] // 82)
|
Digit Recognizer
|
4,269,364 |
train=pd.read_csv(r'.. /input/bike-sharing-demand/train.csv')
test=pd.read_csv(r'.. /input/bike-sharing-demand/test.csv')
df=train.copy()
test_df=test.copy()
df.head()<count_values>
|
ewsult=model.predict(X_test )
|
Digit Recognizer
|
4,269,364 |
df.season.value_counts()<count_values>
|
ewsult=np.argmax(ewsult,axis=1 )
|
Digit Recognizer
|
4,269,364 |
df.holiday.value_counts()
<count_values>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),ewsult],axis = 1)
submission.to_csv("cnnmodel.csv",index=False )
|
Digit Recognizer
|
3,809,504 |
df.workingday.value_counts()
<count_values>
|
x_train = pd.read_csv('.. /input/train.csv')
x_train.head()
|
Digit Recognizer
|
3,809,504 |
df.weather.value_counts()
<categorify>
|
dim_x = 28
dim_y = 28
batch_size=32
x_train.shape
y_train = np.array(x_train['label'])
x_train.drop('label', axis = 1, inplace = True)
x_train = np.array(x_train.values)
print("data shapes", x_train.shape, y_train.shape, "classes: ",len(np.unique(y_train)))
classes = len(np.unique(y_train))
x_train = x_train.reshape(( -1, dim_x,dim_y,1))
print(np.unique(y_train))
y = np.zeros(( np.shape(y_train)[0],len(np.unique(y_train))))
for ii in range(len(y_train)) :
y[ii,y_train[ii]] = 1
y_train = y
|
Digit Recognizer
|
3,809,504 |
season=pd.get_dummies(df['season'],prefix='season')
df=pd.concat([df,season],axis=1)
df.head()
season=pd.get_dummies(test_df['season'],prefix='season')
test_df=pd.concat([test_df,season],axis=1)
test_df.head()<categorify>
|
no_validation = int(0.1 *(x_train.shape[0]))
x_val = x_train[0:no_validation,...]
y_val = y_train[0:no_validation,...]
x_train = x_train[no_validation:,...]
y_train = y_train[no_validation:,...]
print(x_train.shape, y_train.shape, x_val.shape, y_val.shape)
train_datagen = ImageDataGenerator(rescale = 1./255,\
rotation_range=30,\
width_shift_range=0.025,\
height_shift_range=0.025,\
shear_range=0.35,\
zoom_range=0.075)
train_generator = train_datagen.flow(x=x_train,\
y=y_train,\
batch_size=batch_size,\
shuffle=True)
test_datagen = ImageDataGenerator(rescale=1./255)
val_generator = test_datagen.flow(x=x_val,\
y=y_val,\
batch_size=batch_size,\
shuffle=True )
|
Digit Recognizer
|
3,809,504 |
weather=pd.get_dummies(df['weather'],prefix='weather')
df=pd.concat([df,weather],axis=1)
df.head()
weather=pd.get_dummies(test_df['weather'],prefix='weather')
test_df=pd.concat([test_df,weather],axis=1)
test_df.head()<drop_column>
|
model = Sequential()
model.add(Conv2D(filters=96, kernel_size=(5,5), strides=1,input_shape=(dim_x,dim_y,1), activation=tf.nn.relu))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Conv2D(filters=256, kernel_size=(5,5), strides=1, activation=tf.nn.relu))
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=1, activation=tf.nn.relu))
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=1, activation=tf.nn.relu))
model.add(SpatialDropout2D(rate=0.67))
model.add(Conv2D(filters=250, kernel_size=(3,3), strides=1, activation=tf.nn.relu))
model.add(MaxPooling2D(pool_size=2, strides=2))
model.add(Flatten())
model.add(Reshape(( 250,1)))
model.add(AveragePooling1D(pool_size=25,strides=25))
model.add(Reshape(( [10])))
model.add(Activation(tf.nn.softmax))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-4), metrics=['accuracy'] )
|
Digit Recognizer
|
3,809,504 |
df.drop(['season','weather'],inplace=True,axis=1)
df.head()
test_df.drop(['season','weather'],inplace=True,axis=1)
test_df.head()<feature_engineering>
|
def learning_schedule(epoch):
if epoch <= 1:
lr = 3e-4
elif epoch <= 10:
lr = 1e-5
elif epoch <= 50:
lr = 3e-6
elif epoch <= 150:
lr = 1e-6
else:
lr = 1e-8
return lr
lrate = LearningRateScheduler(learning_schedule)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=600, verbose=1, mode='auto' )
|
Digit Recognizer
|
3,809,504 |
df["hour"] = [t.hour for t in pd.DatetimeIndex(df.datetime)]
df["day"] = [t.dayofweek for t in pd.DatetimeIndex(df.datetime)]
df["month"] = [t.month for t in pd.DatetimeIndex(df.datetime)]
df['year'] = [t.year for t in pd.DatetimeIndex(df.datetime)]
df['year'] = df['year'].map({2011:0, 2012:1})
df.head()
test_df["hour"] = [t.hour for t in pd.DatetimeIndex(test_df.datetime)]
test_df["day"] = [t.dayofweek for t in pd.DatetimeIndex(test_df.datetime)]
test_df["month"] = [t.month for t in pd.DatetimeIndex(test_df.datetime)]
test_df['year'] = [t.year for t in pd.DatetimeIndex(test_df.datetime)]
test_df['year'] = test_df['year'].map({2011:0, 2012:1})
test_df.head()<drop_column>
|
steps_per_epoch = int(len(y_train)/batch_size)
max_epochs = 4096
history = model.fit_generator(generator=train_generator,\
steps_per_epoch=steps_per_epoch,\
validation_data=val_generator,\
validation_steps=50,\
epochs=max_epochs,\
callbacks=[early, lrate],\
verbose=2 )
|
Digit Recognizer
|
3,809,504 |
df.drop('datetime',axis=1,inplace=True)
df.head()<drop_column>
|
x_test = pd.read_csv('.. /input/test.csv')
x_test.head()
x_test = np.array(x_test.values)
x_test = x_test / 255.
print("data shape", x_test.shape)
x_test = x_test.reshape(( -1, dim_x,dim_y,1))
|
Digit Recognizer
|
3,809,504 |
df.drop(['casual','registered'],axis=1,inplace=True )<feature_engineering>
|
y_pred = model.predict(x_test )
|
Digit Recognizer
|
3,809,504 |
df.drop(['month'],inplace=True,axis=1)
test_df.drop(['month'],inplace=True,axis=1)
df['holiday'] = df['holiday']
df['workingday'] = df['workingday']<filter>
|
results = np.argmax(y_pred,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False )
|
Digit Recognizer
|
4,996,129 |
df[(df.atemp-df.temp ).abs() >10]<train_model>
|
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
print(train.shape)
print(test.shape)
n_train_img = train.shape[0]
MAX_LR=3e-3
VALID_PCT=3000/n_train_img
VALID_PCT=.1
VALID_PCT
|
Digit Recognizer
|
4,996,129 |
line_fitter = LinearRegression()
line_fitter.fit(df['temp'].values.reshape(-1,1), df['atemp'].values.reshape(-1,1))<predict_on_test>
|
class ArrayDataset(Dataset):
"Sample numpy array dataset"
def __init__(self, x, y):
self.x, self.y = x, y
self.c = 10
def __len__(self):
return len(self.x)
def __getitem__(self, i):
return self.x[i], self.y[i]
def reshape_and_normalize(arr):
return arr.astype(np.float32 ).reshape([-1, 1, 28, 28])/255.0
def conv2(ni,nf):
return conv_layer(ni,nf,stride=2)
def conv_and_res(ni,nf):
return nn.Sequential(conv2(ni, nf), res_block(nf))
def create_nn(n_channel=1):
model = nn.Sequential(
conv_and_res(n_channel, 8),
conv_and_res(8, 16),
conv_and_res(16, 32),
conv_and_res(32, 16),
conv2(16, 10),
Flatten()
)
return model
def get_preds(learn):
pred, _ = learn.get_preds(ds_type=DatasetType.Test)
print(len(pred))
pred = pd.Series(np.argmax(pred, 1))
print(pred.unique())
return pred
def get_callbacks() :
return [partial(EarlyStoppingCallback,
patience=3
)]
def create_learner(data, model):
learn = Learner(data, model, loss_func = nn.CrossEntropyLoss() ,
metrics=accuracy,
callback_fns=get_callbacks())
return learn
def create_submission(learn, file='learn.csv'):
pred = get_preds(learn)
submission = pd.DataFrame({'ImageId': range(1,1+len(pred)) , 'Label': pred})
submission.to_csv(file, index=False)
return
|
Digit Recognizer
|
4,996,129 |
pred_result = line_fitter.predict(df[(df.atemp-df.temp ).abs() >10]['temp'].values.reshape(-1,1))<find_best_model_class>
|
%%time
data = prepare_dataset(train, test)
learn = create_learner(data, create_nn())
learn.fit_one_cycle(1, max_lr=MAX_LR)
learn.lr_find(end_lr=10)
learn.recorder.plot()
|
Digit Recognizer
|
4,996,129 |
df.columns.to_series().groupby(df.dtypes ).groups
x_train,x_test,y_train,y_test=train_test_split(df.drop('count',axis=1),df['count'],test_size=0.25,random_state=42)
models=[RandomForestRegressor() ]
model_names=['RandomForestRegressor']
rmsle=[]
d={}
for model in range(len(models)) :
clf=models[model]
clf.fit(x_train,y_train)
test_pred=clf.predict(x_test)
rmsle.append(np.sqrt(mean_squared_log_error(test_pred,y_test)))
d={'Modelling Algo':model_names,'RMSLE':rmsle}
d<train_on_grid>
|
def prepare_dataset(train, test):
X = reshape_and_normalize(train.drop('label', axis=1 ).values)
y = train.label.values
train_x, valid_x, train_y, valid_y = train_test_split(X, y,test_size=VALID_PCT)
test_x = reshape_and_normalize(test.values)
train_ds, valid_ds = ArrayDataset(train_x, train_y), ArrayDataset(valid_x, valid_y)
test_ds = ArrayDataset(test_x, [EmptyLabel() ]*len(test_x))
data = ImageDataBunch.create(train_ds, valid_ds,
test_ds,
bs=100, num_workers=1)
print("Train/Valid/Test sizes:", len(train_ds), len(valid_ds), len(test_ds))
return data
|
Digit Recognizer
|
4,996,129 |
no_of_test=[500]
params_dict={'n_estimators':no_of_test,'n_jobs':[-1],'max_features':["auto",'sqrt','log2']}
clf_rf=GridSearchCV(estimator=RandomForestRegressor() ,param_grid=params_dict,scoring='neg_mean_squared_log_error')
clf_rf.fit(x_train,y_train)
pred=clf_rf.predict(x_test)
print(( np.sqrt(mean_squared_log_error(pred,y_test))))<find_best_params>
|
tfms = get_transforms(do_flip=False)
len(tfms)
tfms
|
Digit Recognizer
|
4,996,129 |
clf_rf.best_params_<save_to_csv>
|
%%time
data = ImageDataBunch.from_folder('png/train',
bs=100,
ds_tfms=tfms,
valid_pct=VALID_PCT
)
data.add_test(ImageList.from_df(test_df, '.'))
data.normalize()
data
|
Digit Recognizer
|
4,996,129 |
pred=clf_rf.predict(test_df.drop('datetime',axis=1))
d={'datetime':test['datetime'],'count':pred}
ans=pd.DataFrame(d)
ans.to_csv('answer.csv',index=False )<load_from_csv>
|
data.show_batch(figsize=(7,6))
|
Digit Recognizer
|
4,996,129 |
!unzip.. /input/jigsaw-toxic-comment-classification-challenge/train.csv.zip
!unzip.. /input/jigsaw-toxic-comment-classification-challenge/test.csv.zip
!unzip.. /input/jigsaw-toxic-comment-classification-challenge/test_labels.csv.zip
!unzip.. /input/jigsaw-toxic-comment-classification-challenge/sample_submission.csv.zip<define_variables>
|
%%time
learn1 = create_learner(data, create_nn(3))
learn1.fit_one_cycle(1, max_lr=MAX_LR)
learn1.lr_find(end_lr=10)
learn1.recorder.plot()
|
Digit Recognizer
|
4,996,129 |
TRAIN = './train.csv'
TEST = './test.csv'
TEST_LABEL = './test_labels.csv'
SAMPLE = './sample_submission.csv'
EPOCHS = 2
MAX_TOKEN_COUNT = 128
BATCH_SIZE = 32<set_options>
|
learn1.fit_one_cycle(100, max_lr=MAX_LR)
learn1.recorder.plot()
|
Digit Recognizer
|
4,996,129 |
%matplotlib inline
%config InlineBackend.figure_format='retina'
RANDOM_SEED = 42
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
rcParams['figure.figsize'] = 12, 8
pl.seed_everything(RANDOM_SEED )<load_from_csv>
|
%%time
learn2 = cnn_learner(data,
models.resnet50,
metrics=[accuracy], callback_fns=get_callbacks())
learn2.fit_one_cycle(1, max_lr=MAX_LR)
learn2.lr_find(end_lr=10)
learn2.recorder.plot()
|
Digit Recognizer
|
4,996,129 |
df = pd.read_csv(TRAIN)
test_df = pd.read_csv(TEST)
test_label = pd.read_csv(TEST_LABEL)
sample_sub = pd.read_csv(SAMPLE)
df.describe()<split>
|
%%time
learn2.fit_one_cycle(100, max_lr=MAX_LR)
learn2.recorder.plot()
|
Digit Recognizer
|
4,996,129 |
<create_dataframe><EOS>
|
!rm -Rf png
|
Digit Recognizer
|
3,605,350 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_pretrained>
|
import numpy as np
import pandas as pd
from IPython.display import Image
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.python import keras
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, Dropout, MaxPool2D, BatchNormalization, Activation
from tensorflow.python.keras.callbacks import ReduceLROnPlateau
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.optimizers import RMSprop, Adam
from tensorflow.python.keras.activations import relu
from tensorflow.python.keras.utils import plot_model
from tensorflow.python.keras.losses import categorical_crossentropy
|
Digit Recognizer
|
3,605,350 |
BERT_MODEL_NAME = 'bert-base-cased'
tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_NAME )<create_dataframe>
|
digit_data = pd.read_csv('.. /input/train.csv')
digit_data.head(5 )
|
Digit Recognizer
|
3,605,350 |
train_dataset = ToxicCommentsDataset(
train_df,
tokenizer,
max_token_len=MAX_TOKEN_COUNT
)
val_dataset = ToxicCommentsDataset(
val_df,
tokenizer,
max_token_len=MAX_TOKEN_COUNT
)
<create_dataframe>
|
img_rows, img_cols = 28,28
num_classes = 10
def data_prep_train(raw,val_frac):
num_images = int(raw.shape[0])
y_full = keras.utils.to_categorical(raw.label, num_classes)
X_as_array = raw.values[:,1:]
X_shaped_array = X_as_array.reshape(num_images, img_rows, img_cols, 1)
X_full = X_shaped_array / 255
X_train, X_val, y_train, y_val = train_test_split(X_full, y_full, test_size=val_frac)
return X_train, X_val, y_train, y_val
def data_prep_predict(raw):
num_images = int(raw.shape[0])
X_as_array = raw.values
X_shaped_array = X_as_array.reshape(num_images, img_rows, img_cols, 1)
X = X_shaped_array / 255
return X
|
Digit Recognizer
|
3,605,350 |
test_dataset = ToxicCommentsDataset(
test_df,
tokenizer,
max_token_len=MAX_TOKEN_COUNT,
test=True
)
<load_pretrained>
|
def build_model(layer_sizes=[32, 32, 64, 64, 256], kernel_sizes=[5,5,3,3], activation = 'relu'):
model = Sequential()
model.add(Conv2D(layer_sizes[0], kernel_size=kernel_sizes[0], padding = 'same', input_shape=(img_rows, img_cols, 1)))
model.add(BatchNormalization())
model.add(Activation(activation))
model.add(Conv2D(layer_sizes[1], kernel_size=kernel_sizes[1], padding = 'same'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(layer_sizes[2], kernel_size=kernel_sizes[2], padding = 'same'))
model.add(BatchNormalization())
model.add(Activation(activation))
model.add(Conv2D(layer_sizes[3], kernel_size=kernel_sizes[3], padding = 'same'))
model.add(BatchNormalization())
model.add(Activation(activation))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(layer_sizes[4]))
model.add(BatchNormalization())
model.add(Activation(activation))
model.add(Dropout(rate=0.5))
model.add(Dense(num_classes, activation='softmax'))
return model
my_model = build_model()
plot_model(my_model, to_file='my_model.png', show_shapes=True, show_layer_names=True)
Image('my_model.png' )
|
Digit Recognizer
|
3,605,350 |
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False)
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False )<set_options>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False, )
|
Digit Recognizer
|
3,605,350 |
gc.collect()<choose_model_class>
|
def train_model(model, optimizer='adam', batch_size=64, epochs=1, verbose=1, callbacks=[]):
model.compile(loss=categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
history = model.fit(datagen.flow(X_train, y_train, batch_size=batch_size),
epochs=epochs,
verbose=verbose,
validation_data=(X_val,y_val),
callbacks=callbacks)
return history
|
Digit Recognizer
|
3,605,350 |
class ToxicCommentTagger(nn.Module):
def __init__(self, n_classes: int, n_training_steps=None, n_warmup_steps=None):
super().__init__()
self.bert = BertModel.from_pretrained(BERT_MODEL_NAME, return_dict=True)
self.classifier = nn.Linear(self.bert.config.hidden_size, n_classes)
self.n_training_steps = n_training_steps
self.n_warmup_steps = n_warmup_steps
self.criterion = nn.BCELoss()
def forward(self, input_ids, attention_mask, labels=None):
output = self.bert(input_ids, attention_mask=attention_mask)
output = self.classifier(output.pooler_output)
output = torch.sigmoid(output)
loss = 0
if labels is not None:
loss = self.criterion(output, labels)
return loss, output<set_options>
|
X_train, X_val, y_train, y_val = data_prep_train(digit_data,0.1)
leaky_relu = lambda x: relu(x, alpha=0.1)
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
lr_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001)
my_model = build_model(activation=leaky_relu)
history = train_model(my_model, optimizer=optimizer, epochs=40, batch_size = 128, verbose=1, callbacks=[lr_reduction])
plt.figure(figsize=(20,9))
plt.plot(range(20,41),history.history['val_acc'][19:], color='red', label='validation accuracy')
plt.plot(range(20,41),history.history['acc'][19:], color='blue', label='accuracy')
legend = plt.legend(loc='best', shadow=True)
plt.show()
|
Digit Recognizer
|
3,605,350 |
<choose_model_class><EOS>
|
subm_examples = pd.read_csv('.. /input/test.csv')
X_subm = data_prep_predict(subm_examples)
y_subm = my_model.predict(X_subm)
n_rows = y_subm.shape[0]
y_subm = [np.argmax(y_subm[row,:])for row in range(n_rows)]
output = pd.DataFrame({'ImageId': range(1,n_rows+1), 'Label': y_subm})
output.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
3,946,383 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, sampler
from torchvision import transforms
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
from random import shuffle, randint
from PIL import Image
import math
|
Digit Recognizer
|
3,946,383 |
N_EPOCHS = EPOCHS
steps_per_epoch=len(train_df)// BATCH_SIZE
total_training_steps = steps_per_epoch * N_EPOCHS
warmup_steps = total_training_steps // 5
warmup_steps, total_training_steps<choose_model_class>
|
class DigitDataset(Dataset):
def __init__(self, csv_file, root_dir, train=False, transform=None):
self.digit_df = pd.read_csv(root_dir + csv_file)
self.transform = transform
self.train = train
def __len__(self):
return len(self.digit_df)
def __getitem__(self, item):
if self.train:
digit = self.digit_df.iloc[item, 1:].values
digit = digit.astype('float' ).reshape(( 28, 28))
label = self.digit_df.iloc[item, 0]
else:
digit = self.digit_df.iloc[item, :].values
digit = digit.astype('float' ).reshape(( 28, 28))
label = 0
sample = [digit, label]
if self.transform:
sample[0] = self.transform(sample[0])
return sample
|
Digit Recognizer
|
3,946,383 |
optimizer = AdamW(model.parameters() , lr=2e-5)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=total_training_steps
)<train_model>
|
class Regularize(object):
def __init__(self, max_pixel=255):
self.max_pixel = max_pixel
def __call__(self, digit):
assert isinstance(digit, np.ndarray)
digit = digit / self.max_pixel
return digit
class ToTensor(object):
def __call__(self, digit):
assert isinstance(digit, np.ndarray)
digit = digit.reshape(( 1, 28, 28))
digit = torch.from_numpy(digit)
digit = digit.float()
return digit
|
Digit Recognizer
|
3,946,383 |
def train() :
model.train()
total_loss, total_accuracy = 0, 0
avg_loss = 0
total_preds=[]
for step,batch in enumerate(train_dataloader):
if step % 50 == 0 and not step == 0:
print(' Batch {:>5,} of {:>5,}.'.format(step, len(train_dataloader)))
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
labels = batch["labels"].to(device)
model.zero_grad()
loss, outputs = model(input_ids, attention_mask, labels)
total_loss = total_loss + loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters() , 1.0)
optimizer.step()
scheduler.step()
outputs=outputs.detach().cpu().numpy()
total_preds.append(outputs)
avg_loss = total_loss / len(train_dataloader)
print(f"{step}: {avg_loss}")
total_preds = np.concatenate(total_preds, axis=0)
return avg_loss, total_preds<import_modules>
|
data_np = DigitDataset('train.csv', '.. /input/', train=True)
print("Number of Training Images: ", len(data_np))
plt.imshow(data_np[5][0], cmap='gray')
plt.show()
print("Label for the Image: ", data_np[5][1] )
|
Digit Recognizer
|
3,946,383 |
<train_model>
|
composed_transform = transforms.Compose([Regularize() , ToTensor() ])
data_torch = DigitDataset('train.csv', '.. /input/', train=True, transform=composed_transform)
dataloader = DataLoader(data_torch,
batch_size=4,
shuffle=True,
num_workers=4)
for i, data in enumerate(dataloader, 0):
digits, labels = data
print("Type of Digits: ", type(digits))
print("Dimension of the Tensor: ", digits.shape)
print("Type of Labels: ", type(labels))
print("Dimension of the Tensor: ", labels.shape)
if i == 0:
break
|
Digit Recognizer
|
3,946,383 |
def evaluate() :
print("
Evaluating...")
model.eval()
total_loss, total_accuracy = 0, 0
total_preds = []
total_labels = []
for step,batch in enumerate(val_dataloader):
if step % 50 == 0 and not step == 0:
print(' Batch {:>5,} of {:>5,}.'.format(step, len(val_dataloader)))
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
labels = batch["labels"].to(device)
with torch.no_grad() :
loss, outputs = model(input_ids, attention_mask, labels)
total_loss = total_loss + loss.item()
outputs = outputs.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
total_preds.append(outputs)
total_labels.append(labels)
avg_loss = total_loss / len(val_dataloader)
print(f"{step}: {avg_loss}")
total_preds = np.concatenate(total_preds, axis=0)
total_labels = np.concatenate(total_labels, axis=0)
true = np.array(total_labels)
pred = np.array(total_preds>0.5)
for i, name in enumerate(LABEL_COLUMNS):
try:
print(f"{name} roc_auc {roc_auc_score(true[:, i], pred[:, i])}")
except Exception as e:
print(e)
pass
print(f"Evaluate loss {total_loss / len(val_dataloader)}")
return avg_loss, total_preds, total_labels<train_model>
|
def digits_per_class(digit_df, indices):
assert isinstance(digit_df, pd.DataFrame)
assert isinstance(indices, list)
digit_num = [0 for num in range(10)]
for idx in indices:
label = digit_df.iloc[idx, 0]
digit_num[label] += 1
return digit_num
|
Digit Recognizer
|
3,946,383 |
%%time
best_valid_loss = float('inf')
train_losses=[]
valid_losses=[]
EPOCHS = 2
for epoch in range(EPOCHS):
print('
Epoch {:} / {:}'.format(epoch + 1, EPOCHS))
train_loss, _ = train()
valid_loss, _, _ = evaluate()
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict() , 'saved_weights.pt')
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'
Training Loss: {train_loss:.3f}')
print(f'Validation Loss: {valid_loss:.3f}' )<train_model>
|
digit_class_num = digits_per_class(data_torch.digit_df,
[num for num in range(len(data_torch)) ])
for i, num in enumerate(digit_class_num, 0):
print("Number of Images for Digit ", i, ": ", num)
print("Overall Images: ", sum(digit_class_num))
|
Digit Recognizer
|
3,946,383 |
def test() :
print("
Testing...")
model.eval()
total_loss, total_accuracy = 0, 0
total_preds = []
_ids = []
for step,batch in enumerate(test_dataloader):
if step % 50 == 0 and not step == 0:
print(' Batch {:>5,} of {:>5,}.'.format(step, len(test_dataloader)))
_id = batch["_id"]
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
with torch.no_grad() :
loss, outputs = model(input_ids, attention_mask)
total_loss = total_loss + loss
outputs = outputs
_ids.append(_id)
total_preds.append(outputs)
avg_loss = total_loss / len(test_dataloader)
_ids = np.concatenate(_ids, axis=0)
total_preds = torch.cat(total_preds, axis=0)
results = dict(id=_ids,
predictions = total_preds
)
return avg_loss, total_preds, results<compute_train_metric>
|
def train_validate_split(digit_df, test_ratio=0.2):
assert isinstance(digit_df, pd.DataFrame)
digit_num = len(digit_df)
overall_indices = [num for num in range(digit_num)]
overall_class_num = digits_per_class(digit_df, overall_indices)
test_class_num = [int(num*test_ratio)for num in overall_class_num]
tmp_test_class_num = [0 for num in range(10)]
shuffle(overall_indices)
train_indices = []
val_indices = []
for idx in overall_indices:
tmp_label = digit_df.iloc[idx, 0]
if tmp_test_class_num[tmp_label] < test_class_num[tmp_label]:
val_indices.append(idx)
tmp_test_class_num[tmp_label] += 1
else:
train_indices.append(idx)
return train_indices, val_indices
|
Digit Recognizer
|
3,946,383 |
def evaluate_roc(probs, y_true):
preds = probs
fpr, tpr, threshold = roc_curve(y_true, preds)
roc_auc = auc(fpr, tpr)
print(f'AUC: {roc_auc:.4f}')
y_pred = np.where(preds >= 0.5, 1, 0)
accuracy = accuracy_score(y_true, y_pred)
print(f'Accuracy: {accuracy*100:.2f}%')
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()<compute_train_metric>
|
train_data, val_data = train_validate_split(data_torch.digit_df)
train_class_num = digits_per_class(data_torch.digit_df, train_data)
val_class_num = digits_per_class(data_torch.digit_df, val_data)
for i, num in enumerate(train_class_num, 0):
print("Number of Images for Digit ", i, "- Train: ", num, "Validate: ", val_class_num[i])
print("Train Images: ", sum(train_class_num), "Validate Images: ", sum(val_class_num))
|
Digit Recognizer
|
3,946,383 |
avg_loss, total_preds, total_labels = evaluate()<compute_test_metric>
|
train_sampler = sampler.SubsetRandomSampler(train_data)
train_dataloader = DataLoader(data_torch,
batch_size=4,
shuffle=False,
sampler=train_sampler,
num_workers=4 )
|
Digit Recognizer
|
3,946,383 |
for i, name in enumerate(LABEL_COLUMNS):
print(f"label: {name}")
evaluate_roc(total_preds[:,i]>0.5, total_labels[:,i] )<compute_test_metric>
|
class BasicLeNet(nn.Module):
def __init__(self):
super(BasicLeNet, self ).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 6, 5),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(6, 16, 5),
nn.ReLU(inplace=True),
nn.MaxPool2d(2)
)
self.classifier = nn.Sequential(
nn.Linear(16*4*4, 120),
nn.ReLU(inplace=True),
nn.Linear(120, 84),
nn.ReLU(inplace=True),
nn.Linear(84, 10)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 16*4*4)
x = self.classifier(x)
return x
|
Digit Recognizer
|
3,946,383 |
avg_test_loss, total_test_preds, sub = test()<create_dataframe>
|
def training(network, criterion, optimizer, epoch_num, test=True):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Start Training with", device, epoch_num, "overall epoch")
network.to(device)
composed_transform = transforms.Compose([Regularize() , ToTensor() ])
digit_dataset = DigitDataset('train.csv', '.. /input/', train=True, transform=composed_transform)
if test:
train_indices, val_indices = train_validate_split(digit_dataset.digit_df)
train_sampler = sampler.SubsetRandomSampler(train_indices)
val_sampler = sampler.SubsetRandomSampler(val_indices)
train_dataloader = DataLoader(
digit_dataset,
batch_size=32,
shuffle=False,
sampler=train_sampler,
num_workers=4,
pin_memory=True
)
val_dataloader = DataLoader(
digit_dataset,
batch_size=32,
shuffle=False,
sampler=val_sampler,
num_workers=4,
pin_memory=True
)
print("Training with validation, ", "Overall Data:", len(train_indices)+len(val_indices))
print("Training Data:", len(train_indices), "Validate Data:", len(val_indices))
else:
train_dataloader = DataLoader(
digit_dataset,
batch_size=32,
shuffle=True,
num_workers=4,
pin_memory=True
)
val_dataloader = None
print("Training all data, ", "Overall Data:", len(digit_dataset))
batch_num = 0
ita = []
loss_avg = []
val_acc = []
for epoch in range(epoch_num):
running_loss = 0.0
for i, data in enumerate(train_dataloader, 0):
digits, labels = data
digits, labels = digits.to(device), labels.to(device)
optimizer.zero_grad()
outputs = network(digits)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
batch_num += 1
if test == True and i % 500 == 499:
ita.append(batch_num)
loss_avg.append(running_loss/500.)
val_acc.append(validating(network, val_dataloader))
running_loss = 0.
if test:
train_accuracy = validating(network, train_dataloader)
val_accuracy = validating(network, val_dataloader)
print('Training accuracy: %.5f' %(train_accuracy))
print('Validation accuracy: %.5f' %(val_accuracy))
return network, ita, loss_avg, val_acc
|
Digit Recognizer
|
3,946,383 |
D = pd.DataFrame()
D['id'] = sub['id']
D<data_type_conversions>
|
def validating(network, loader):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
correct_num = 0
total_num = 0
for i, data in enumerate(loader, 0):
digits, labels = data
total_num += labels.size(0)
digits, labels = digits.to(device), labels.to(device)
outputs = network(digits)
_, predicted = torch.max(outputs, 1)
correct_num +=(( predicted == labels ).sum().to("cpu")).item()
accuracy = correct_num / total_num
return accuracy
|
Digit Recognizer
|
3,946,383 |
D[LABEL_COLUMNS] =(sub['predictions'].cpu().numpy())
D<save_to_csv>
|
lenet = BasicLeNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(lenet.parameters())
lenet, batch_ita, loss_list, val_acc_list = training(lenet, criterion, optimizer, 30 )
|
Digit Recognizer
|
3,946,383 |
D.to_csv("submission.csv", index=False )<import_modules>
|
class DigitDataset(Dataset):
def __init__(self, csv_file, root_dir, train=False, argument=True, transform=None):
self.digit_df = pd.read_csv(root_dir + csv_file)
self.transform = transform
self.train = train
self.argument = argument
def __len__(self):
if self.argument:
return 2 * len(self.digit_df)
else:
return len(self.digit_df)
def __getitem__(self, item):
if item < len(self.digit_df):
if self.train:
digit = self.digit_df.iloc[item, 1:].values
digit = digit.astype('float' ).reshape(( 28, 28))
label = self.digit_df.iloc[item, 0]
else:
digit = self.digit_df.iloc[item, :].values
digit = digit.astype('float' ).reshape(( 28, 28))
label = 0
else:
assert self.argument and self.train
digit = self.digit_df.iloc[item % len(self.digit_df), 1:].values
digit = digit.astype('float' ).reshape(( 28, 28))
rand_theta =(randint(-20, 20)/ 180)* math.pi
rand_x = randint(-2, 2)
rand_y = randint(-2, 2)
rand_scale = randint(9, 11)* 0.1
digit = digit_argument(digit, rand_theta, [rand_x, rand_y], rand_scale)
label = self.digit_df.iloc[item % len(self.digit_df), 0]
sample = [digit, label]
if self.transform:
sample[0] = self.transform(sample[0])
return sample
|
Digit Recognizer
|
3,946,383 |
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
from tensorflow import keras
from keras import layers
from keras.callbacks import Callback
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from textblob import TextBlob
from textblob import Word
import re
import string
<load_from_disk>
|
def train_validate_split(digit_df, test_ratio=0.2, argument=True):
assert isinstance(digit_df, pd.DataFrame)
digit_num = len(digit_df)
overall_indices = [num for num in range(digit_num)]
overall_class_num = digits_per_class(digit_df, overall_indices)
test_class_num = [int(num*test_ratio)for num in overall_class_num]
tmp_test_class_num = [0 for num in range(10)]
shuffle(overall_indices)
train_indices = []
val_indices = []
for idx in overall_indices:
tmp_label = digit_df.iloc[idx, 0]
if tmp_test_class_num[tmp_label] < test_class_num[tmp_label]:
val_indices.append(idx)
tmp_test_class_num[tmp_label] += 1
else:
train_indices.append(idx)
if argument:
train_indices.append(idx + digit_num)
return train_indices, val_indices
|
Digit Recognizer
|
3,946,383 |
!unzip -q "/kaggle/input/jigsaw-toxic-comment-classification-challenge/*.zip"
!dir<load_from_csv>
|
class EnhancedLeNet(nn.Module):
def __init__(self):
super(EnhancedLeNet, self ).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 64, 5, padding=2),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 5, padding=2),
nn.ReLU(inplace=True),
nn.BatchNorm2d(64),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, 5, padding=2),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, 5, padding=2),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128),
nn.MaxPool2d(2),
)
self.classifier = nn.Sequential(
nn.Linear(128*7*7, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 128*7*7)
x = self.classifier(x)
return x
|
Digit Recognizer
|
3,946,383 |
train_data_file = "train.csv"
test_data_file = "test.csv"
submission_file = "sample_submission.csv"
train_data = pd.read_csv(train_data_file)
test_data = pd.read_csv(test_data_file)
submission_result = pd.read_csv(submission_file )<define_variables>
|
lenet = EnhancedLeNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(lenet.parameters())
lenet, batch_ita, loss_list, val_acc_list = training(lenet, criterion, optimizer, 30 )
|
Digit Recognizer
|
3,946,383 |
max_len = 120
embedding_dim = 300
vocabulary_size = 20000
num_tokens = vocabulary_size+1<categorify>
|
def testing(network):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
composed_transform = transforms.Compose([Regularize() , ToTensor() ])
digit_dataset = DigitDataset('test.csv', '.. /input/', train=False, argument=False, transform=composed_transform)
test_dataloader = DataLoader(
digit_dataset,
batch_size=128,
shuffle=False,
num_workers=4,
pin_memory=True
)
test_results = []
for i, data in enumerate(test_dataloader, 0):
digits, label = data
digits = digits.to(device)
outputs = network(digits)
_, predicted = torch.max(outputs, 1)
test_results += np.int_(predicted.to("cpu" ).numpy().squeeze() ).tolist()
test_df = pd.read_csv(".. /input/sample_submission.csv")
assert(len(test_df)== len(test_results))
test_df.loc[:, 'Label'] = test_results
test_df.to_csv('test_results.csv', index=False)
print("Test Results for Kaggle Generated..." )
|
Digit Recognizer
|
3,946,383 |
def preprocess(corpus):
printable = set(string.printable)
corpus = ''.join(filter(lambda x: x in printable, corpus))
corpus = corpus.lower()
corpus = re.sub(r"won't", "will not", corpus)
corpus = re.sub(r"can't", "can not", corpus)
corpus = re.sub(r"ain't","is not", corpus)
corpus = re.sub(r"shan't", "shall not", corpus)
corputs = re.sub(r"let's", "let us", corpus)
corpus = re.sub(r"n't", " not", corpus)
corpus = re.sub(r"'re", " are", corpus)
corpus = re.sub(r"'s", " is", corpus)
corpus = re.sub(r"'d", " would", corpus)
corpus = re.sub(r"'ll", " will", corpus)
corpus = re.sub(r"'t", " not", corpus)
corpus = re.sub(r"'ve", " have", corpus)
corpus = re.sub(r"'m", " am", corpus)
corpus = re.sub(r"'", " ", corpus)
correction_list = {"youfuck": "you fuck", \
"fucksex": "fuck sex",\
"bitchbot": "bitch bot",\
"offfuck": "fuck off",\
"donkeysex": "donkey sex",\
"securityfuck": "security fuck",\
"ancestryfuck": "ancestry fuck",\
"turkeyfuck": "turkey fuck",\
"faggotgay": "faggot gay",\
"fuckbot": "fuck bot",\
"assfuckers": "ass fucker",\
"ckckck": "cock",\
"fuckfuck": "fuck",\
"lolol": "lol",\
"pussyfuck": "fuck",\
"gaygay": "gay",\
"haha": "ha",\
"sucksuck": "suck"
}
for old,new in correction_list.items() :
corpus = corpus.replace(old,new)
return corpus<prepare_x_and_y>
|
lenet = EnhancedLeNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(lenet.parameters())
lenet, batch_ita, loss_list, val_acc_list = training(lenet, criterion, optimizer, 50, test=False)
testing(lenet )
|
Digit Recognizer
|
4,188,803 |
tokenizer = Tokenizer(num_words = vocabulary_size+1,\
filters='!"
0123456789',\
lower=True, split=' ')
X_train_raw = train_data["comment_text"]
X_test_raw = test_data["comment_text"]
bad_comment_cat = ['toxic', 'severe_toxic', 'obscene', 'threat',\
'insult', 'identity_hate']
Y_train = train_data[bad_comment_cat]<string_transform>
|
%matplotlib inline
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep' )
|
Digit Recognizer
|
4,188,803 |
X_train_raw = X_train_raw.apply(lambda x: preprocess(str(x)))
X_test_raw = X_test_raw.apply(lambda x: preprocess(str(x)))
tokenizer.fit_on_texts(X_train_raw)
tokenizer.fit_on_texts(X_test_raw)
X_train = pad_sequences(tokenizer.texts_to_sequences(X_train_raw),\
maxlen = max_len, truncating = "pre")
X_test = pad_sequences(tokenizer.texts_to_sequences(X_test_raw),\
maxlen = max_len, truncating = "pre")
x_train, x_val, y_train, y_val = train_test_split(X_train,Y_train,train_size=0.9, random_state=199)
print(x_train.shape," ",y_train.shape," ",x_val.shape," ",y_val.shape )<categorify>
|
train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )
|
Digit Recognizer
|
4,188,803 |
def get_weights(embedding_vectors,embedding_dim):
global num_tokens,tokenizer
embedding_weights = np.zeros(( num_tokens,embedding_dim))
misses = 0
for word, i in tokenizer.word_index.items() :
vector = embedding_vectors.get(word)
if i>=num_tokens :
break
elif vector is not None:
embedding_weights[i] = vector
else:
if len(word)<20:
word = Word(word)
word = word.spellcheck() [0][0]
vector = embedding_vectors.get(str(word))
if vector is not None:
embedding_weights[i] = vector
else:
misses +=1
else:
misses +=1
print(f"The number of missed words is {misses}")
return embedding_weights<string_transform>
|
X_train = X_train / 255.0
test = test / 255.0
|
Digit Recognizer
|
4,188,803 |
embedding_vectors_fasttext = {}
with open("/kaggle/input/fasttext-crawl-300d-2m/crawl-300d-2M.vec","r")as file:
file.readline()
for line in file:
word , vector = line.split(maxsplit=1)
vector = np.fromstring(vector,"float32",sep=" ")
embedding_vectors_fasttext[word] = vector<load_pretrained>
|
Y_train = to_categorical(Y_train, num_classes = 10 )
|
Digit Recognizer
|
4,188,803 |
embedding_weights_fasttext = get_weights(embedding_vectors_fasttext,embedding_dim=300 )<feature_engineering>
|
random_seed = 2
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed )
|
Digit Recognizer
|
4,188,803 |
embedding_vectors_glove = {}
with open("/kaggle/input/glove6b/glove.6B.300d.txt","r")as file:
for line in file:
word , vector = line.split(maxsplit=1)
vector = np.fromstring(vector,"float32",sep=" ")
embedding_vectors_glove[word] = vector<load_pretrained>
|
model_test = Sequential()
model_test.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model_test.add(MaxPool2D(pool_size=(2,2)))
model_test.add(Dropout(0.5))
model_test.add(Flatten())
model_test.add(Dense(256, activation = "relu"))
model_test.add(Dropout(0.5))
model_test.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
4,188,803 |
embedding_weights_glove = get_weights(embedding_vectors_glove,embedding_dim=300 )<choose_model_class>
|
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model_test.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
epochs = 2
batch_size = 86
|
Digit Recognizer
|
4,188,803 |
def GRU_model_glove() :
global max_len,num_tokens,embedding_weights_glove
inputs = layers.Input(shape=(max_len,))
x = layers.Embedding(input_dim=num_tokens,\
output_dim=embedding_dim,\
embeddings_initializer=keras.initializers.Constant(embedding_weights_glove),\
trainable=True )(inputs)
x = layers.SpatialDropout1D(0.3 )(x)
forward_layer = layers.GRU(42,return_sequences=True)
backward_layer = layers.GRU(42,activation="relu",dropout=0.1,return_sequences=True,go_backwards=True)
x = layers.Bidirectional(forward_layer,backward_layer=backward_layer )(x)
x = layers.GlobalMaxPooling1D()(x)
outputs = layers.Dense(units=6,activation='sigmoid' )(x)
model = keras.models.Model(inputs=inputs, outputs=outputs, name="GRU_model_glove")
model.compile(optimizer=tf.optimizers.Adam() ,\
loss=tf.losses.BinaryCrossentropy() ,\
metrics=['AUC'])
return model
GRU_model_glove = GRU_model_glove()
GRU_model_glove.summary()<train_model>
|
history = model_test.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs,
validation_data =(X_val, Y_val), verbose = 2 )
|
Digit Recognizer
|
4,188,803 |
history = GRU_model_glove.fit(x_train, y_train, epochs=2,\
batch_size=128, validation_data=(x_val,y_val))<choose_model_class>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.