kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
11,129,033 |
X_test = X_test[cols_list]<predict_on_test>
|
X_train = train.iloc[:,1:].values.astype('float32')
y_train = train['label'].values.astype('int32')
test = test.values.astype('float32')
del train
|
Digit Recognizer
|
11,129,033 |
y_test_predict=xgb_best.predict(X_test)
y_test_final = rev_trans(y_test_predict)
df_test['count']=y_test_final<save_to_csv>
|
X_train = X_train / 255.0
test = test / 255.0
|
Digit Recognizer
|
11,129,033 |
df_test[['datetime','count']].to_csv('/kaggle/working/submissionNoParams.csv', index=False )<set_options>
|
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
test = test.reshape(test.shape[0], 28, 28, 1 )
|
Digit Recognizer
|
11,129,033 |
%matplotlib inline
<load_from_csv>
|
y_train = to_categorical(y_train)
y_train
|
Digit Recognizer
|
11,129,033 |
train_df = pd.read_csv('.. /input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('.. /input/bike-sharing-demand/test.csv')
<count_missing_values>
|
np.random.seed(2 )
|
Digit Recognizer
|
11,129,033 |
print(train_df.isnull().values.any() ,'
',test_df.isnull().values.any() )<create_dataframe>
|
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size = 0.1, random_state = 2 )
|
Digit Recognizer
|
11,129,033 |
test = pd.DataFrame(test_df)
test<create_dataframe>
|
model = Sequential()
model.add(Conv2D(filters = 64, kernel_size =(5,5), padding = 'Same', activation = 'relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 64, kernel_size =(5,5), padding = 'Same', activation = 'relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters = 128, kernel_size =(3,3), padding = 'Same', activation = 'relu'))
model.add(Conv2D(filters = 128, kernel_size =(3,3), padding = 'Same', activation = 'relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation = 'softmax'))
|
Digit Recognizer
|
11,129,033 |
train = pd.DataFrame(train_df)
train<feature_engineering>
|
model.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy'] )
|
Digit Recognizer
|
11,129,033 |
train['datetime'] = pd.to_datetime(train['datetime'], format = '%Y-%m-%dT%H:%M:%S')
train['year'] = train['datetime'].dt.year
train['month'] = train['datetime'].dt.month
train['day'] = train['datetime'].dt.day
train['hour'] = train['datetime'].dt.hour
train<drop_column>
|
epochs = 50
batch_size = 64
|
Digit Recognizer
|
11,129,033 |
train = train_df.drop('datetime' , axis=1)
train<sort_values>
|
datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
11,129,033 |
train[["season", "count"]].groupby(['season'], as_index=False ).sum().sort_values(by='count', ascending=False )<sort_values>
|
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_test,y_test),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size )
|
Digit Recognizer
|
11,129,033 |
train[["holiday", "count"]].groupby(['holiday'], as_index=False ).sum().sort_values(by='count', ascending=False )<sort_values>
|
results = model.predict(test)
|
Digit Recognizer
|
11,129,033 |
train[["workingday", "count"]].groupby(['workingday'], as_index=False ).sum().sort_values(by='count', ascending=False )<feature_engineering>
|
output = pd.concat([pd.Series(range(1,28001), name = 'ImageId'), results], axis = 1)
output
|
Digit Recognizer
|
11,129,033 |
<create_dataframe><EOS>
|
output.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
11,055,227 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
|
%matplotlib inline
|
Digit Recognizer
|
11,055,227 |
c=[]
for i in test['hour']:
if i>= 6 or i<= 1 :
c.append("Day")
else:
c.append("Night")
test['DayorNight']=c
test['DayorNight']=pd.factorize(test['DayorNight'])[0].reshape(-1, 1)
c=[]
test = test.drop('datetime' , axis =1)
test<feature_engineering>
|
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
11,055,227 |
c=[]
for i in train['hour']:
if i>= 6 or i<= 1 :
c.append("Day")
else:
c.append("Night")
train['DayorNight']=c
train['DayorNight']=pd.factorize(train['DayorNight'])[0].reshape(-1, 1)
c=[]
train.head(2 )<drop_column>
|
X_train = train.iloc[:,1:].values.astype('float32')
y_train = train['label'].values.astype('int32')
test = test.values.astype('float32')
del train
|
Digit Recognizer
|
11,055,227 |
x = train
x =x.drop('count',axis=1)
x<split>
|
X_train = X_train / 255.0
test = test / 255.0
|
Digit Recognizer
|
11,055,227 |
x_train, x_val, y_train, y_val = train_test_split(x , y, train_size=0.8, test_size=0.2, random_state = 0 )<import_modules>
|
y_train = to_categorical(y_train)
y_train
|
Digit Recognizer
|
11,055,227 |
import xgboost as xgb
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_log_error
<import_modules>
|
np.random.seed(2 )
|
Digit Recognizer
|
11,055,227 |
<compute_train_metric>
|
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size = 0.0001, random_state = 2 )
|
Digit Recognizer
|
11,055,227 |
xgbr = xgb.XGBRegressor(verbosity=0)
xgbr.fit(x_train, y_train)
score_xgbr = xgbr.score(x_train, y_train)
print("Training score: ", score_xgbr)
scores_xgbr = cross_val_score(xgbr, x_train, y_train,cv=10)
print("Mean cross-validation score: %.2f" % scores_xgbr.mean())
y_pred_xgbr = xgbr.predict(x_val)
print('Validation score' , xgbr.score(x_val , y_val))
rmsle_xgbr = mean_squared_log_error(y_pred_xgbr ,y_val)
print("RMSLE: %.2f" % rmsle_xgbr)
<compute_train_metric>
|
model = Sequential()
model.add(Conv2D(filters = 64, kernel_size =(5,5), padding = 'Same', activation = 'relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 64, kernel_size =(5,5), padding = 'Same', activation = 'relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters = 128, kernel_size =(3,3), padding = 'Same', activation = 'relu'))
model.add(Conv2D(filters = 128, kernel_size =(3,3), padding = 'Same', activation = 'relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation = 'softmax'))
|
Digit Recognizer
|
11,055,227 |
<predict_on_test>
|
model.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy'] )
|
Digit Recognizer
|
11,055,227 |
pred =np.round(np.expm1(xgbr.predict(test)) ).astype(int)
pred = pd.DataFrame({"datetime": test_df["datetime"],"count": np.fix(pred)})
pred.shape<set_options>
|
epochs = 38
batch_size = 64
|
Digit Recognizer
|
11,055,227 |
plt.style.use('bmh')
%matplotlib inline
<load_from_csv>
|
datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
11,055,227 |
train_df=pd.read_csv('.. /input/bike-sharing-demand/train.csv')
test_df=pd.read_csv('.. /input/bike-sharing-demand/test.csv')
display(train_df.head() )<count_duplicates>
|
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_test,y_test),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size )
|
Digit Recognizer
|
11,055,227 |
train_df.duplicated().sum()
<count_missing_values>
|
results = model.predict(test)
|
Digit Recognizer
|
11,055,227 |
print(train_df.isnull().sum())
print(test_df.isnull().sum() )<feature_engineering>
|
output = pd.concat([pd.Series(range(1,28001), name = 'ImageId'), results], axis = 1)
output
|
Digit Recognizer
|
11,055,227 |
<feature_engineering><EOS>
|
output.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
3,829,911 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
|
%matplotlib inline
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep' )
|
Digit Recognizer
|
3,829,911 |
train_df.drop(columns = ['second','Minute'],inplace = True)
test_df.drop(columns = ['second','Minute'],inplace = True )<rename_columns>
|
train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv")
submit = pd.read_csv(".. /input/sample_submission.csv" )
|
Digit Recognizer
|
3,829,911 |
train_df = train_df.set_index('datetime')
test_df = test_df.set_index('datetime')
test_df_ID = test_df.index
train_df.head()<drop_column>
|
x_train = train.iloc[:,1:]
y_train = train.iloc[:,0]
|
Digit Recognizer
|
3,829,911 |
casual_df = train_df.drop(['registered','count'],axis = 1)
casual_df.head()<drop_column>
|
x_train=x_train/255.0
test=test/255.0
|
Digit Recognizer
|
3,829,911 |
registered_df = train_df.drop(['casual','count'],axis = 1)
registered_df.head()<feature_engineering>
|
x_train = x_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1 )
|
Digit Recognizer
|
3,829,911 |
registered_df['rushHours'] = registered_df['Hour'].isin([8,17,18])
<feature_engineering>
|
y_train=to_categorical(y_train,num_classes=10)
print(x_train.shape,y_train.shape,test.shape )
|
Digit Recognizer
|
3,829,911 |
registered_df['registered']=np.log1p(registered_df['registered'])
registered_df['windspeed']=np.log1p(registered_df['windspeed'])
<feature_engineering>
|
num_classes = y_train.shape[1]
num_pixels = x_train.shape[1]
|
Digit Recognizer
|
3,829,911 |
casual_df['casual']=np.log1p(casual_df['casual'])
casual_df['windspeed']=np.log1p(casual_df['windspeed'])
<data_type_conversions>
|
seed=7
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.10, random_state=seed )
|
Digit Recognizer
|
3,829,911 |
registered_df['rushHours'] = pd.factorize(registered_df['rushHours'])[0].reshape(-1, 1)
registered_df['weekEnd'] = pd.factorize(registered_df['weekEnd'])[0].reshape(-1, 1)
casual_df['weekEnd'] = pd.factorize(casual_df['weekEnd'])[0].reshape(-1, 1)
<split>
|
def cnn_model() :
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.20))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.20))
model.add(Flatten())
model.add(Dense(128, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(128, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
model.compile(optimizer = 'adam' , loss = "categorical_crossentropy", metrics=["accuracy"])
return model
model=cnn_model()
|
Digit Recognizer
|
3,829,911 |
Y_registered=registered_df.registered
registered_df.drop(columns=['registered','atemp'],inplace = True)
X_train, X_valid, y_train, y_valid = train_test_split(registered_df,Y_registered, train_size=0.8, test_size=0.2,random_state=0 )<train_model>
|
datagen = ImageDataGenerator(
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1)
datagen.fit(x_train )
|
Digit Recognizer
|
3,829,911 |
def get_best_model(X_train, X_valid, y_train, y_valid):
estimators=[('et',ExtraTreesRegressor()),('hgr', HistGradientBoostingRegressor())]
models=[RandomForestRegressor() ,AdaBoostRegressor() ,BaggingRegressor() ,SVR() ,LinearRegression() ,DecisionTreeRegressor() ,ExtraTreesRegressor() , HistGradientBoostingRegressor() ,VotingRegressor(estimators=estimators)]
model_names=['RandomForestRegressor','AdaBoostRegressor','BaggingRegressor','SVR','LinearRegression','DecisionTreeRegressor','ExtraTreesRegressor',' HistGradientBoostingRegressor','VotingRegressor']
rmsle=[]
d={}
for model in range(len(models)) :
clf=models[model]
clf.fit(X_train,y_train)
print("model_name : ",model_names[model])
print(clf.get_params())
test_pred=clf.predict(X_valid)
rmsle.append(np.sqrt(mean_squared_log_error(abs(test_pred),(y_valid))))
d={'Modelling Algo':model_names,'RMSLE':rmsle}
rmsle_frame=pd.DataFrame(d)
print(f'{rmsle_frame}
______________________________________________________________________________________')
sns.factorplot(y='Modelling Algo',x='RMSLE',data=rmsle_frame,kind='bar',size=5,aspect=2)
plt.show()
<train_model>
|
history = model.fit_generator(datagen.flow(x_train,y_train, batch_size=batch_size),
epochs = epochs, validation_data =(x_test,y_test),
verbose = 1, steps_per_epoch=x_train.shape[0], callbacks=callbacks_list )
|
Digit Recognizer
|
3,829,911 |
get_best_model(X_train, X_valid, y_train, y_valid )<compute_test_metric>
|
submit.Label =model.predict_classes(test )
|
Digit Recognizer
|
3,829,911 |
<train_on_grid><EOS>
|
submit.head()
submit.to_csv('submit.csv',index=False )
|
Digit Recognizer
|
7,280,484 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<predict_on_test>
|
print("Tensorflow DL Version: " + tf.__version__)
print("Setup Completed" )
|
Digit Recognizer
|
7,280,484 |
pred_registered = best_HistGradientAlgo_registered.predict(X_valid)
print(rmsle(pred_registered,y_valid))<split>
|
train_file = ".. /input/digit-recognizer/train.csv"
predict_file = ".. /input/digit-recognizer/test.csv"
submission_file = ".. /input/digit-recognizer/sample_submission.csv"
train_data = pd.read_csv(train_file, sep=',')
predict_data = pd.read_csv(predict_file, sep=',')
print("Files Preparation Completed" )
|
Digit Recognizer
|
7,280,484 |
Y_casual=casual_df.casual
casual_df.drop(columns=['casual','atemp'],inplace = True)
X_train, X_valid, y_train, y_valid = train_test_split(casual_df,Y_casual, train_size=0.8, test_size=0.2,random_state=0 )<train_model>
|
print('**************************Train File Preliminary Investigation**************************')
print('1.Train File Shape:', train_data.shape)
missing_val_count_by_column =(train_data.isnull().sum())
print('2.Train File Missing Valu:', missing_val_count_by_column[missing_val_count_by_column > 0])
print(" Missing Values in Train File: {}".format(train_data.isna().any().any()))
print('3.Train File Variables Information:')
print(train_data.info())
print('4.Train File Variables Unique Number:')
print(train_data.nunique())
print(' ')
print('**************************Prediction File Preliminary Investigation**************************')
print('1.Test File Shape:', predict_data.shape)
missing_val_count_by_column =(predict_data.isnull().sum())
print('2.Test File Missing Value:', missing_val_count_by_column[missing_val_count_by_column > 0])
print(" Missing Values in Test File: {}".format(predict_data.isna().any().any()))
print('3.Test File Variables Information:')
print(predict_data.info())
print('4.Test File Variables Unique Number:')
print(predict_data.nunique() )
|
Digit Recognizer
|
7,280,484 |
get_best_model(X_train, X_valid, y_train, y_valid )<train_on_grid>
|
def label_conversion(data, num_classes = 10):
label_array = np.array(data, dtype='uint8')
label_array = to_categorical(label_array,num_classes=num_classes)
return label_array
def images_conversion(data, img_row = 28, imag_col = 28):
image_array = np.array(data, dtype='uint8')
image_array = np.reshape(image_array,(len(image_array), img_row, imag_col, 1))
image_array = image_array *(1./ 255)- 0.5
image_array = np.reshape(image_array,(len(image_array), img_row, imag_col, 1))
return image_array
print("Function Established Completed" )
|
Digit Recognizer
|
7,280,484 |
HistGradientAlgo_casual = HistGradientBoostingRegressor()
param = {
'max_iter':[i for i in range(115,118)],
'max_depth' : [i for i in range(13,18)],
'max_leaf_nodes':[25]
}
gridSearch_HistGradientAlgo_casual=GridSearchCV(HistGradientAlgo_casual,param,scoring=myScorer,cv=5,verbose=3)
gridSearch_HistGradientAlgo_casual.fit(X_train, y_train)
best_HistGradientAlgo_casual=gridSearch_HistGradientAlgo_casual.best_estimator_
bestHistGradientAlgo_testScore_casual=best_HistGradientAlgo_casual.score(X_train, y_train )<predict_on_test>
|
y = label_conversion(train_data[output_list[0]], num_classes = num_classes)
X = images_conversion(train_data.drop(output_list[0], axis=1), img_row = img_rows, imag_col = img_cols)
X_predict = images_conversion(predict_data, img_row = img_rows, imag_col = img_cols)
print("Conversion Completed" )
|
Digit Recognizer
|
7,280,484 |
pred_casual = best_HistGradientAlgo_casual.predict(X_valid)
print(rmsle(pred_casual,y_valid))<predict_on_test>
|
print("Training Data: {}
Labels: {}".format(X, y))
|
Digit Recognizer
|
7,280,484 |
test_df['windspeed']=np.log1p(test_df['windspeed'])
test_df['rushHours'] = test_df['Hour'].isin([8,17,18])
test_df['rushHours'] = pd.factorize(test_df['rushHours'])[0].reshape(-1, 1)
test_df['weekEnd'] = pd.factorize(test_df['weekEnd'])[0].reshape(-1, 1)
test_df.drop(columns=['atemp'],inplace = True)
pred_casual = np.round(np.expm1(best_HistGradientAlgo_casual.predict(test_df.drop('rushHours',axis = 1)))).astype(int)
pred_registered = np.round(np.expm1(best_HistGradientAlgo_registered.predict(test_df)) ).astype(int)
<create_dataframe>
|
print("Prediction Data: {}
".format(X_predict))
|
Digit Recognizer
|
7,280,484 |
pred_registered = pd.DataFrame(pred_registered,columns = ['count'])
pred_registered<create_dataframe>
|
data_aug = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
print("Data Augmentation Setting Completed" )
|
Digit Recognizer
|
7,280,484 |
pred_casual = pd.DataFrame(pred_casual,columns = ['count'])
pred_casual<prepare_output>
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
print("Data Preparation Completed" )
|
Digit Recognizer
|
7,280,484 |
predictions = pd.DataFrame({'datetime':test_df_ID})
predictions['count'] = pred_registered['count'] + pred_casual['count']<save_to_csv>
|
l_model_name = 'lenet5_model'
v_model_name = 'vgg_model'
batch_size = 64
num_classes = 10
epoch_num = 6
dropout_rate = 0.5
oz = keras.optimizers.Adam(lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-08,
decay=1e-4,
amsgrad=False)
sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
print("Parameters Setting Completed" )
|
Digit Recognizer
|
7,280,484 |
predictions.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!" )<import_modules>
|
lenet5_model_history = lenet5_model.fit_generator(data_aug.flow(X_train, y_train, batch_size=batch_size),
epochs = epoch_num,
validation_data =(X_test, y_test),
verbose = 2,
steps_per_epoch=X_train.shape[0],
callbacks=[l_tb_callback,
l_lr_callback,
l_ck_callback,
l_best_callback])
lenet5_model.save(l_model_name+'.h5')
del lenet5_model
print("Training Completed!" )
|
Digit Recognizer
|
7,280,484 |
from IPython.display import Image<define_variables>
|
vgg_model_history = vgg_model.fit_generator(data_aug.flow(X_train, y_train, batch_size=batch_size),
epochs = epoch_num,
validation_data =(X_test, y_test),
verbose = 2,
steps_per_epoch=X_train.shape[0],
callbacks=[v_tb_callback,
v_lr_callback,
v_ck_callback,
v_best_callback])
vgg_model.save(v_model_name+'.h5')
del vgg_model
print("Training Completed!" )
|
Digit Recognizer
|
7,280,484 |
Image(filename=".. /input/sf-picture/sf1.jpg" )<set_options>
|
def best_model(histories, key='categorical_crossentropy'):
ini_flag = bool(1)
best_model = None
best_score = 0
for name, history in histories:
val_score_record = history['val_'+key]
for score in val_score_record:
if ini_flag:
best_score = score
best_model = name
ini_flag = bool(0)
if best_score > score:
best_score = score
best_model = name
return best_model
best_model_name = best_model([(l_model_name, lenet5_model_history.history),
(v_model_name, vgg_model_history.history)])
print(best_model_name )
|
Digit Recognizer
|
7,280,484 |
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
sns.set_style("white")
rcParams['figure.figsize'] =(8,4)
<load_from_csv>
|
Digit Recognizer
|
|
7,280,484 |
df = pd.read_csv(".. /input/sf-crime/train.csv.zip",dtype={"X":np.float32,"Y":np.float32} )<count_duplicates>
|
load_model = keras.models.load_model(best_model_name+'.h5')
print("Check Model:")
load_model.evaluate(X_test, y_test)
sample_submission = pd.read_csv(submission_file)
submission_id = sample_submission["ImageId"]
submission = pd.DataFrame({
"ImageId": submission_id,
"Label": np.argmax(load_model.predict(X_predict), axis=1)
})
submission.to_csv('submission.csv', index=False)
print("Submission File Produced Completed" )
|
Digit Recognizer
|
1,811,546 |
print(df.duplicated(keep=False ).value_counts())
df = df.drop_duplicates()<data_type_conversions>
|
train = pd.read_csv('.. /input/train.csv')
labels = train.iloc[:,0].values.astype('int32')
X_train =(train.iloc[:,1:].values ).astype('float32')
X_test =(pd.read_csv('.. /input/test.csv' ).values ).astype('float32')
X_train = X_train.reshape(-1,28,28,1)
X_test = X_test.reshape(-1,28,28,1)
y_train = tf.keras.utils.to_categorical(labels)
print("Check data")
print(labels)
print(X_train[0].shape)
print(y_train )
|
Digit Recognizer
|
1,811,546 |
def convert_dataframe(df):
df["Dates"] = pd.to_datetime(df["Dates"],infer_datetime_format=True)
df['Date'] = df['Dates'].dt.date
df["Year"] = df["Dates"].dt.year.astype(np.int32)
df["Month"] = df["Dates"].dt.month.astype(np.int32)
df["Day"] = df["Dates"].dt.day.astype(np.int32)
df["Hour"] = df["Dates"].dt.hour.astype(np.int32)
df["Minute"] = df["Dates"].dt.minute.astype(np.int32)
df["Day_week_numeric"] = df["Dates"].dt.dayofweek.astype(np.int32)
df["Weekend"]= np.where(( df["Day_week_numeric"] >= 4)&(df["Day_week_numeric"] <=6),1,0)
df["count_days"] =(df['Date'] - df['Date'].min() ).apply(lambda x: x.days)
df["Block"] = df.Address.str.contains("Block" ).astype(np.int32)
df = df.drop(["Date","Address"],axis=1)
return df<create_dataframe>
|
( train_imagesRaw, train_labelsRaw),(test_imagesRaw, test_labelsRaw)= mnist.load_data()
|
Digit Recognizer
|
1,811,546 |
df_date = convert_dataframe(df )<categorify>
|
X_train_keras = train_imagesRaw.reshape(-1,28,28,1)
X_test_keras = test_imagesRaw.reshape(-1,28,28,1)
print("X_train_keras",X_train_keras.shape)
print("X_test_keras",X_test_keras.shape)
train_labels_keras = tf.keras.utils.to_categorical(train_labelsRaw)
test_labels_keras = tf.keras.utils.to_categorical(test_labelsRaw)
print("train_labels_keras ",train_labels_keras.shape)
print("test_labels_keras ", test_labels_keras.shape )
|
Digit Recognizer
|
1,811,546 |
label_cat = LabelEncoder()
df_date["Category_encode"] = label_cat.fit_transform(df_date.Category)
label_dist = LabelEncoder()
df_date["PdDistric_encode"] = label_dist.fit_transform(df_date.PdDistrict)
<filter>
|
train_images = np.concatenate(( X_train_keras,X_train,X_test_keras), axis=0)
print("new Concatenated train_images ", train_images.shape)
print("_"*50)
train_labels = np.concatenate(( train_labels_keras,y_train,test_labels_keras), axis=0)
print("new Concatenated train_labels ", train_labels.shape )
|
Digit Recognizer
|
1,811,546 |
df_outliers = df_date.loc[df_date.Y < 90.].copy()<train_model>
|
scale = np.max(train_images)
train_images /= scale
X_test /= scale
print("Max: {}".format(scale))
|
Digit Recognizer
|
1,811,546 |
X = df_outliers.drop(["Dates","Category","Descript","DayOfWeek","PdDistrict","Resolution","Category_encode"],axis=1 ).copy()
y = df_outliers["Category_encode"]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.25, random_state = 21)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
kmeans = KMeans(n_clusters=6,random_state=0 ).fit(X_train)
<prepare_output>
|
input_size = train_images.shape
n_logits = train_labels.shape[1]
print("Input: {}".format(input_size))
print("Output: {}".format(n_logits))
|
Digit Recognizer
|
1,811,546 |
X_train_df = pd.DataFrame(X_train)
X_val_df = pd.DataFrame(X_val)
X_train_df["Kmean"] = kmeans.labels_
X_val_df["Kmean"] = kmeans.predict(X_val )<train_model>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 30
batch_size = 512
X_train, X_val, Y_train, Yval = train_test_split(train_images, train_labels, train_size = 0.90)
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
1,811,546 |
classifier = RandomForestClassifier(n_jobs = -1,random_state =50,max_depth=10,max_features="auto",min_samples_split=4)
classifier.fit(X_train_df, y_train )<predict_on_test>
|
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3),padding='same', activation='relu', input_shape=input_size[1:]))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=None))
model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(n_logits, activation='softmax'))
optimizer = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.summary()
|
Digit Recognizer
|
1,811,546 |
predict_proba = classifier.predict_proba(X_val_df)
log_loss(y_val,predict_proba )<load_from_csv>
|
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Yval),
verbose = 1, steps_per_epoch=X_train.shape[0] )
|
Digit Recognizer
|
1,811,546 |
<categorify><EOS>
|
predictions = model.predict_classes(X_test, verbose=0)
print(predictions)
pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) , "Label": predictions} ).to_csv("preds.csv", index=False, header=True)
|
Digit Recognizer
|
6,583,149 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<normalization>
|
!pip install keras-tuner
|
Digit Recognizer
|
6,583,149 |
test_data_scaler = scaler.transform(test_data_final )<predict_on_test>
|
train = pd.read_csv('.. /input/digit-recognizer/train.csv')
labels = train.iloc[:,0].values.astype('int32')
X_train =(train.iloc[:,1:].values ).astype('float32')
X_test =(pd.read_csv('.. /input/digit-recognizer/test.csv' ).values ).astype('float32')
X_train = X_train.reshape(-1,28,28,1)
X_test = X_test.reshape(-1,28,28,1)
y_train = tf.keras.utils.to_categorical(labels)
print("Check data")
print(labels)
print(X_train[0].shape)
print(y_train )
|
Digit Recognizer
|
6,583,149 |
test_data_final = pd.DataFrame(test_data_scaler)
test_data_final["Kmean"] = kmeans.predict(test_data_final)
<predict_on_test>
|
( train_imagesRaw, train_labelsRaw),(test_imagesRaw, test_labelsRaw)= mnist.load_data()
|
Digit Recognizer
|
6,583,149 |
test_data_pred_proba = classifier.predict_proba(test_data_final)
keys = label_cat.classes_
<prepare_output>
|
X_train_keras = train_imagesRaw.reshape(-1,28,28,1)
X_test_keras = test_imagesRaw.reshape(-1,28,28,1)
print("X_train_keras",X_train_keras.shape)
print("X_test_keras",X_test_keras.shape)
train_labels_keras = tf.keras.utils.to_categorical(train_labelsRaw)
test_labels_keras = tf.keras.utils.to_categorical(test_labelsRaw)
print("train_labels_keras ",train_labels_keras.shape)
print("test_labels_keras ", test_labels_keras.shape )
|
Digit Recognizer
|
6,583,149 |
result = pd.DataFrame(data=test_data_pred_proba,columns=keys)
result.head(3 )<save_to_csv>
|
train_images = np.concatenate(( X_train_keras,X_train,X_test_keras), axis=0)
print("new Concatenated train_images ", train_images.shape)
print("_"*50)
train_labels = np.concatenate(( train_labels_keras,y_train,test_labels_keras), axis=0)
print("new Concatenated train_labels ", train_labels.shape )
|
Digit Recognizer
|
6,583,149 |
result.to_csv(path_or_buf="classifier_sf.csv",index=True, index_label = 'Id' )<load_from_csv>
|
scale = np.max(train_images)
train_images /= scale
X_test /= scale
print("Max: {}".format(scale))
|
Digit Recognizer
|
6,583,149 |
train = pd.read_csv(".. /input/titanic/train.csv", index_col='PassengerId')
test = pd.read_csv('.. /input/titanic/test.csv', index_col='PassengerId')
train.head(10 )<sort_values>
|
X_train, X_val, y_train, y_val = train_test_split(train_images, train_labels, test_size=0.10 )
|
Digit Recognizer
|
6,583,149 |
train.corr().abs() ['Survived'].sort_values(ascending=False )<count_missing_values>
|
input_size = X_train.shape
n_logits = y_train.shape[1]
print("Input: {}".format(input_size))
print("Output: {}".format(n_logits))
|
Digit Recognizer
|
6,583,149 |
print(train.isnull().sum())
print('
The observation ratio of missing Cabin values is',round(train['Cabin'].isnull().sum() /len(train),2))
print('The observation ratio of missing Age values is',round(train['Age'].isnull().sum() /len(train),2))
print('
')
print(test.isnull().sum())
print('
The observation ratio of missing Cabin values is',round(test['Cabin'].isnull().sum() /len(test),2))
print('The observation ratio of missing Age values is',round(test['Age'].isnull().sum() /len(test),2))<prepare_output>
|
def build_model(hp):
num_layers = hp.Int('num_layers', min_value=2, max_value=16, step=2)
lr = hp.Choice('learning_rate', [1e-3, 5e-4])
inputs = layers.Input(shape=(28, 28, 1))
x = inputs
for idx in range(num_layers):
idx = str(idx)
filters = hp.Int('filters_' + idx, 32, 256, step=32, default=64)
x = layers.Conv2D(filters=filters, kernel_size=3, padding='same',
activation='relu' )(x)
if x.shape[1] >= 8:
pool_type = hp.Choice('pool_' + idx, values=['max', 'avg'])
if pool_type == 'max':
x = layers.MaxPooling2D(2 )(x)
elif pool_type == 'avg':
x = layers.AveragePooling2D(2 )(x)
x = layers.Flatten()(x)
x = layers.Dense(256, activation='relu' )(x)
x = layers.Dense(256, activation='relu' )(x)
x = layers.Dense(256, activation='relu' )(x)
x = layers.Dropout(0.5 )(x)
outputs = layers.Dense(n_logits, activation='softmax' )(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer=Adam(lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
|
Digit Recognizer
|
6,583,149 |
train_copy = train.copy()
train_copy['Cabin'] = train_copy['Cabin'].apply(lambda x: 0 if pd.isnull(x)else 1 )<sort_values>
|
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=8,
executions_per_trial=3,
directory='my_dir',
project_name='mnist')
tuner.search_space_summary()
|
Digit Recognizer
|
6,583,149 |
train.corr().abs() ['Age'].sort_values(ascending=False )<train_model>
|
tuner.search(X_train, y_train,
epochs=30,
validation_data=(X_val, y_val))
|
Digit Recognizer
|
6,583,149 |
<prepare_output><EOS>
|
predictions_vector = model.predict(X_test, verbose=0)
predictions = np.argmax(predictions_vector,axis=1)
pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) , "Label": predictions} ).to_csv("preds.csv", index=False, header=True )
|
Digit Recognizer
|
7,204,871 |
<feature_engineering><EOS>
|
import pandas as pd
|
Digit Recognizer
|
7,204,871 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
|
import pandas as pd
|
Digit Recognizer
|
7,204,871 |
class feature_engineering(BaseEstimator, TransformerMixin):
def __init__(self, columns=None):
self.columns = columns
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, **transform_params):
X['Title'] = X.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip()
X['Title'] = X['Title'].apply(lambda x: x if x in ['Mrs', 'Master', 'Miss', 'Dr','Mr','Rev' ] else 'else')
X['Family_Size'] = X['SibSp']+X['Parch'] + 1
X = X.drop(columns=['SibSp', 'Parch'])
X['has_cabin'] = X['Cabin'].apply(lambda x: 0 if pd.isnull(x)else 1)
return X<categorify>
|
true_test = pd.read_csv(".. /input/mnist-in-csv/mnist_test.csv")
true_train = pd.read_csv(".. /input/mnist-in-csv/mnist_train.csv")
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv")
given_test = pd.read_csv(".. /input/digit-recognizer/test.csv")
given_train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
|
Digit Recognizer
|
7,204,871 |
cat_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")) ,
("cat_encoder", OneHotEncoder()),
] )<feature_engineering>
|
cols = given_test.columns
given_test['dataset'] = 'test'
given_train['dataset'] = 'train'
|
Digit Recognizer
|
7,204,871 |
num_attribs = ["Age", "Fare","Family_Size"]
cat_attribs = [ "Sex", "Embarked","Pclass","Title","has_cabin"]
columns_trans = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", cat_pipeline, cat_attribs),
] )<drop_column>
|
given_dataset = pd.concat([given_train.drop('label', axis=1), given_test] ).reset_index()
true_mnist = pd.concat([true_train, true_test] ).reset_index(drop=True)
labels = true_mnist['label'].values
true_mnist.drop('label', axis=1, inplace=True)
true_mnist.columns = cols
|
Digit Recognizer
|
7,204,871 |
full_pipeline = Pipeline([
('age_inferer', age_inferer(['Age', 'Pclass'])) ,
('feature_engineering', feature_engineering()),
('ColumnTransformer', columns_trans),
] )<prepare_x_and_y>
|
true_idx = true_mnist.sort_values(by=list(true_mnist.columns)).index
dataset_from = given_dataset.sort_values(by=list(true_mnist.columns)) ['dataset'].values
original_idx = given_dataset.sort_values(by=list(true_mnist.columns)) ['index'].values
|
Digit Recognizer
|
7,204,871 |
y_train = train["Survived"]
train = train.drop(columns=["Survived"])
x_train = full_pipeline.fit_transform(train)
<prepare_x_and_y>
|
for i in range(len(true_idx)) :
if dataset_from[i] == 'test':
sample_submission.loc[original_idx[i], 'Label'] = labels[true_idx[i]]
|
Digit Recognizer
|
7,204,871 |
<compute_test_metric><EOS>
|
sample_submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
2,217,921 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model>
|
%matplotlib notebook
|
Digit Recognizer
|
2,217,921 |
final_classifier = SVC(probability=True)
final_classifier.fit(x_train, y_train)
x_test = full_pipeline.transform(test)
result = final_classifier.predict(x_test)
<save_to_csv>
|
dataRawTrain = pd.read_csv('.. /input/train.csv')
dataRawTest = pd.read_csv('.. /input/test.csv')
dataTrain = np.array(dataRawTrain.iloc[:,1:] ).astype('uint8')
dataTrain = dataTrain.reshape(-1, 1, 28, 28)
targetTrain = np.array(dataRawTrain.iloc[:,:1])
zz = np.zeros([len(targetTrain), 10])
for i, d in enumerate(targetTrain):
zz[i][d.squeeze() ] = 0.66
targetTrain = zz
dataTest = np.array(dataRawTest.iloc[:,:] ).astype('uint8')
dataTest = dataTest.reshape(-1, 1, 28, 28)
for i in range(1):
plt.matshow(dataTrain[i][0], cmap='rainbow')
plt.suptitle('class: {}'.format(targetTrain[i]))
for i in range(1):
plt.matshow(dataTest[i][0], cmap='rainbow')
plt.show()
|
Digit Recognizer
|
2,217,921 |
submission = pd.DataFrame({'PassengerId':test.index,'Survived':result})
submission.to_csv('submissionRF_optim_param.csv',index=False )<set_options>
|
class NetMNIST2(nn.Module):
flag = False
def __init__(self):
super(NetMNIST2, self ).__init__()
self.conv0 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1, bias=True)
self.relu0 = nn.PReLU()
self.pool0 = nn.MaxPool2d(2)
self.drop0 = nn.Dropout2d()
self.conv1p = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.relu1p = nn.PReLU()
self.drop1p = nn.Dropout2d()
self.conv1 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=True)
self.relu1 = nn.PReLU()
self.pool1 = nn.MaxPool2d(2)
self.drop1 = nn.Dropout2d()
self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.relu2 = nn.PReLU()
self.pool2 = nn.MaxPool2d(2)
self.drop2 = nn.Dropout2d()
self.fc1 = nn.Linear(32 * 6 * 6, 10)
self.fc1Prelu = nn.PReLU()
self.fc2 = nn.Linear(10, 10)
def forward(self, x):
x = self.conv0(x)
x = self.relu0(x)
x = self.pool0(x)
x = self.drop0(x)
x = self.conv1p(x)
x = self.relu1p(x)
x = self.drop1p(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.pool1(x)
x = self.drop1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.pool2(x)
x = self.drop2(x)
x = x.view(-1, 128*3*3)
x = self.fc1(x)
x = self.fc1Prelu(x)
x = torch.nn.functional.dropout(x)
x = self.fc2(x)
x = F.softmax(x, dim=1)
return x
|
Digit Recognizer
|
2,217,921 |
%matplotlib inline
style = "<style>svg{width: 70% !important; height: 60% !important;} </style>"
HTML(style )<load_from_csv>
|
use_cuda = True
device = torch.device("cuda" if use_cuda else "cpu")
net = NetMNIST2().to(device)
print(net)
optimizer = optim.Adam(net.parameters() , lr=1e-4, weight_decay=1e-5)
criterion = F.mse_loss
def trainEpoch(e):
net.train()
for i,(data, target)in enumerate(loaderTrain):
dataCUDA, targetCUDA = Variable(data ).to(device), Variable(target.float() ).to(device)
optimizer.zero_grad()
outModel = net(dataCUDA)
loss = F.mse_loss(outModel, targetCUDA)
loss.backward()
optimizer.step()
return loss
def testLoss() :
net.eval()
fit = 0.0
for i,(data, target)in enumerate(loaderTest):
dataCUDA, targetCUDA = Variable(data ).to(device), Variable(target.float() ).to(device)
outModel = net(dataCUDA)
pred = outModel.data.max(1)[1]
targetMaxIndex = targetCUDA.data.max(1)[1]
fit += pred.eq(targetMaxIndex ).cpu().sum()
acc = float(fit.cpu())/ float(len(loaderTest.dataset))
return acc
lossProgress = []
for e in range(5):
lossTrain = trainEpoch(e)
accTest = testLoss()
lossProgress.append(accTest)
print('Test epoch: {} acc: {}'.format(e, accTest))
print('Train epoch: {} loss: {}'.format(e, lossTrain))
plt.plot(lossProgress)
|
Digit Recognizer
|
2,217,921 |
<count_missing_values><EOS>
|
packSize = 10
inputLen = dataTestT.size(0)
indexInput = np.arange(1, inputLen + 1)
result = np.zeros([inputLen],dtype=int)
print(inputLen / packSize)
for i in range(int(inputLen / packSize)) :
dataCUDA = Variable(dataTestT[i * packSize:i*packSize+packSize].cuda())
outModel = net(dataCUDA)
result[i * packSize:i*packSize+packSize] = outModel.data.max(1)[1].cpu().numpy().squeeze()
result=pd.DataFrame({'ImageId':indexInput, 'Label':result})
result.to_csv("submission.csv",index=False)
|
Digit Recognizer
|
8,959,201 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<prepare_x_and_y>
|
plt.style.use('ggplot')
|
Digit Recognizer
|
8,959,201 |
train_X = titanic_data.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis = 1)
test_X = titanic_data_test.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis = 1)
train_y = titanic_data.Survived<data_type_conversions>
|
train_df = pd.read_csv(".. /input/digit-recognizer/train.csv")
test_df = pd.read_csv(".. /input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
8,959,201 |
train_X = train_X.fillna({'Age': train_X.Age.median() , 'Fare': train_X.Fare.median() })
test_X = test_X.fillna({'Age': test_X.Age.median() , 'Fare': test_X.Fare.median() } )<categorify>
|
X =(train_df.iloc[:,1:].values ).astype('float32')
Y = train_df.iloc[:,0].values.astype('int32')
test = test_df.values.astype('float32' )
|
Digit Recognizer
|
8,959,201 |
train_X = pd.get_dummies(train_X)
test_X = pd.get_dummies(test_X )<count_missing_values>
|
X = X.reshape(X.shape[0], 28, 28, 1)
Y = to_categorical(Y)
test = test.reshape(test.shape[0], 28, 28, 1 )
|
Digit Recognizer
|
8,959,201 |
test_X.isnull().sum()<choose_model_class>
|
X = X.astype("float32")/ 255
test = test.astype("float32")/ 255
|
Digit Recognizer
|
8,959,201 |
clf_rf = RandomForestClassifier(n_jobs=-1,
criterion='entropy',
min_samples_split=2,
min_samples_leaf=1)
parameters = {'n_estimators': range(3,40), 'max_depth': range(1,10)}
grid_search_cv_clf = GridSearchCV(clf_rf, parameters, cv = 5 )<train_on_grid>
|
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size = 0.1, random_state=0 )
|
Digit Recognizer
|
8,959,201 |
grid_search_cv_clf.fit(train_X,train_y )<find_best_params>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
8,959,201 |
best_clf = grid_search_cv_clf.best_estimator_
grid_search_cv_clf.best_params_<compute_test_metric>
|
model = Sequential()
model.add(Conv2D(32, kernel_size = 4, activation="relu", input_shape=(28,28,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size = 3, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size = 2, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))
model.compile(loss="mean_squared_error", optimizer="rmsprop", metrics=['accuracy'])
model.summary()
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.